head.S 40 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616
  1. /*
  2. * OpenRISC head.S
  3. *
  4. * Linux architectural port borrowing liberally from similar works of
  5. * others. All original copyrights apply as per the original source
  6. * declaration.
  7. *
  8. * Modifications for the OpenRISC architecture:
  9. * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
  10. * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
  11. *
  12. * This program is free software; you can redistribute it and/or
  13. * modify it under the terms of the GNU General Public License
  14. * as published by the Free Software Foundation; either version
  15. * 2 of the License, or (at your option) any later version.
  16. */
  17. #include <linux/linkage.h>
  18. #include <linux/threads.h>
  19. #include <linux/errno.h>
  20. #include <linux/init.h>
  21. #include <linux/serial_reg.h>
  22. #include <asm/processor.h>
  23. #include <asm/page.h>
  24. #include <asm/mmu.h>
  25. #include <asm/pgtable.h>
  26. #include <asm/cache.h>
  27. #include <asm/spr_defs.h>
  28. #include <asm/asm-offsets.h>
  29. #include <linux/of_fdt.h>
  30. #define tophys(rd,rs) \
  31. l.movhi rd,hi(-KERNELBASE) ;\
  32. l.add rd,rd,rs
  33. #define CLEAR_GPR(gpr) \
  34. l.or gpr,r0,r0
  35. #define LOAD_SYMBOL_2_GPR(gpr,symbol) \
  36. l.movhi gpr,hi(symbol) ;\
  37. l.ori gpr,gpr,lo(symbol)
  38. #define UART_BASE_ADD 0x90000000
  39. #define EXCEPTION_SR (SPR_SR_DME | SPR_SR_IME | SPR_SR_DCE | SPR_SR_ICE | SPR_SR_SM)
  40. #define SYSCALL_SR (SPR_SR_DME | SPR_SR_IME | SPR_SR_DCE | SPR_SR_ICE | SPR_SR_IEE | SPR_SR_TEE | SPR_SR_SM)
  41. /* ============================================[ tmp store locations ]=== */
  42. /*
  43. * emergency_print temporary stores
  44. */
  45. #define EMERGENCY_PRINT_STORE_GPR4 l.sw 0x20(r0),r4
  46. #define EMERGENCY_PRINT_LOAD_GPR4 l.lwz r4,0x20(r0)
  47. #define EMERGENCY_PRINT_STORE_GPR5 l.sw 0x24(r0),r5
  48. #define EMERGENCY_PRINT_LOAD_GPR5 l.lwz r5,0x24(r0)
  49. #define EMERGENCY_PRINT_STORE_GPR6 l.sw 0x28(r0),r6
  50. #define EMERGENCY_PRINT_LOAD_GPR6 l.lwz r6,0x28(r0)
  51. #define EMERGENCY_PRINT_STORE_GPR7 l.sw 0x2c(r0),r7
  52. #define EMERGENCY_PRINT_LOAD_GPR7 l.lwz r7,0x2c(r0)
  53. #define EMERGENCY_PRINT_STORE_GPR8 l.sw 0x30(r0),r8
  54. #define EMERGENCY_PRINT_LOAD_GPR8 l.lwz r8,0x30(r0)
  55. #define EMERGENCY_PRINT_STORE_GPR9 l.sw 0x34(r0),r9
  56. #define EMERGENCY_PRINT_LOAD_GPR9 l.lwz r9,0x34(r0)
  57. /*
  58. * TLB miss handlers temorary stores
  59. */
  60. #define EXCEPTION_STORE_GPR9 l.sw 0x10(r0),r9
  61. #define EXCEPTION_LOAD_GPR9 l.lwz r9,0x10(r0)
  62. #define EXCEPTION_STORE_GPR2 l.sw 0x64(r0),r2
  63. #define EXCEPTION_LOAD_GPR2 l.lwz r2,0x64(r0)
  64. #define EXCEPTION_STORE_GPR3 l.sw 0x68(r0),r3
  65. #define EXCEPTION_LOAD_GPR3 l.lwz r3,0x68(r0)
  66. #define EXCEPTION_STORE_GPR4 l.sw 0x6c(r0),r4
  67. #define EXCEPTION_LOAD_GPR4 l.lwz r4,0x6c(r0)
  68. #define EXCEPTION_STORE_GPR5 l.sw 0x70(r0),r5
  69. #define EXCEPTION_LOAD_GPR5 l.lwz r5,0x70(r0)
  70. #define EXCEPTION_STORE_GPR6 l.sw 0x74(r0),r6
  71. #define EXCEPTION_LOAD_GPR6 l.lwz r6,0x74(r0)
  72. /*
  73. * EXCEPTION_HANDLE temporary stores
  74. */
  75. #define EXCEPTION_T_STORE_GPR30 l.sw 0x78(r0),r30
  76. #define EXCEPTION_T_LOAD_GPR30(reg) l.lwz reg,0x78(r0)
  77. #define EXCEPTION_T_STORE_GPR10 l.sw 0x7c(r0),r10
  78. #define EXCEPTION_T_LOAD_GPR10(reg) l.lwz reg,0x7c(r0)
  79. #define EXCEPTION_T_STORE_SP l.sw 0x80(r0),r1
  80. #define EXCEPTION_T_LOAD_SP(reg) l.lwz reg,0x80(r0)
  81. /*
  82. * For UNHANLDED_EXCEPTION
  83. */
  84. #define EXCEPTION_T_STORE_GPR31 l.sw 0x84(r0),r31
  85. #define EXCEPTION_T_LOAD_GPR31(reg) l.lwz reg,0x84(r0)
  86. /* =========================================================[ macros ]=== */
  87. #define GET_CURRENT_PGD(reg,t1) \
  88. LOAD_SYMBOL_2_GPR(reg,current_pgd) ;\
  89. tophys (t1,reg) ;\
  90. l.lwz reg,0(t1)
  91. /*
  92. * DSCR: this is a common hook for handling exceptions. it will save
  93. * the needed registers, set up stack and pointer to current
  94. * then jump to the handler while enabling MMU
  95. *
  96. * PRMS: handler - a function to jump to. it has to save the
  97. * remaining registers to kernel stack, call
  98. * appropriate arch-independant exception handler
  99. * and finaly jump to ret_from_except
  100. *
  101. * PREQ: unchanged state from the time exception happened
  102. *
  103. * POST: SAVED the following registers original value
  104. * to the new created exception frame pointed to by r1
  105. *
  106. * r1 - ksp pointing to the new (exception) frame
  107. * r4 - EEAR exception EA
  108. * r10 - current pointing to current_thread_info struct
  109. * r12 - syscall 0, since we didn't come from syscall
  110. * r13 - temp it actually contains new SR, not needed anymore
  111. * r31 - handler address of the handler we'll jump to
  112. *
  113. * handler has to save remaining registers to the exception
  114. * ksp frame *before* tainting them!
  115. *
  116. * NOTE: this function is not reentrant per se. reentrancy is guaranteed
  117. * by processor disabling all exceptions/interrupts when exception
  118. * accours.
  119. *
  120. * OPTM: no need to make it so wasteful to extract ksp when in user mode
  121. */
  122. #define EXCEPTION_HANDLE(handler) \
  123. EXCEPTION_T_STORE_GPR30 ;\
  124. l.mfspr r30,r0,SPR_ESR_BASE ;\
  125. l.andi r30,r30,SPR_SR_SM ;\
  126. l.sfeqi r30,0 ;\
  127. EXCEPTION_T_STORE_GPR10 ;\
  128. l.bnf 2f /* kernel_mode */ ;\
  129. EXCEPTION_T_STORE_SP /* delay slot */ ;\
  130. 1: /* user_mode: */ ;\
  131. LOAD_SYMBOL_2_GPR(r1,current_thread_info_set) ;\
  132. tophys (r30,r1) ;\
  133. /* r10: current_thread_info */ ;\
  134. l.lwz r10,0(r30) ;\
  135. tophys (r30,r10) ;\
  136. l.lwz r1,(TI_KSP)(r30) ;\
  137. /* fall through */ ;\
  138. 2: /* kernel_mode: */ ;\
  139. /* create new stack frame, save only needed gprs */ ;\
  140. /* r1: KSP, r10: current, r4: EEAR, r31: __pa(KSP) */ ;\
  141. /* r12: temp, syscall indicator */ ;\
  142. l.addi r1,r1,-(INT_FRAME_SIZE) ;\
  143. /* r1 is KSP, r30 is __pa(KSP) */ ;\
  144. tophys (r30,r1) ;\
  145. l.sw PT_GPR12(r30),r12 ;\
  146. l.mfspr r12,r0,SPR_EPCR_BASE ;\
  147. l.sw PT_PC(r30),r12 ;\
  148. l.mfspr r12,r0,SPR_ESR_BASE ;\
  149. l.sw PT_SR(r30),r12 ;\
  150. /* save r30 */ ;\
  151. EXCEPTION_T_LOAD_GPR30(r12) ;\
  152. l.sw PT_GPR30(r30),r12 ;\
  153. /* save r10 as was prior to exception */ ;\
  154. EXCEPTION_T_LOAD_GPR10(r12) ;\
  155. l.sw PT_GPR10(r30),r12 ;\
  156. /* save PT_SP as was prior to exception */ ;\
  157. EXCEPTION_T_LOAD_SP(r12) ;\
  158. l.sw PT_SP(r30),r12 ;\
  159. /* save exception r4, set r4 = EA */ ;\
  160. l.sw PT_GPR4(r30),r4 ;\
  161. l.mfspr r4,r0,SPR_EEAR_BASE ;\
  162. /* r12 == 1 if we come from syscall */ ;\
  163. CLEAR_GPR(r12) ;\
  164. /* ----- turn on MMU ----- */ ;\
  165. l.ori r30,r0,(EXCEPTION_SR) ;\
  166. l.mtspr r0,r30,SPR_ESR_BASE ;\
  167. /* r30: EA address of handler */ ;\
  168. LOAD_SYMBOL_2_GPR(r30,handler) ;\
  169. l.mtspr r0,r30,SPR_EPCR_BASE ;\
  170. l.rfe
  171. /*
  172. * this doesn't work
  173. *
  174. *
  175. * #ifdef CONFIG_JUMP_UPON_UNHANDLED_EXCEPTION
  176. * #define UNHANDLED_EXCEPTION(handler) \
  177. * l.ori r3,r0,0x1 ;\
  178. * l.mtspr r0,r3,SPR_SR ;\
  179. * l.movhi r3,hi(0xf0000100) ;\
  180. * l.ori r3,r3,lo(0xf0000100) ;\
  181. * l.jr r3 ;\
  182. * l.nop 1
  183. *
  184. * #endif
  185. */
  186. /* DSCR: this is the same as EXCEPTION_HANDLE(), we are just
  187. * a bit more carefull (if we have a PT_SP or current pointer
  188. * corruption) and set them up from 'current_set'
  189. *
  190. */
  191. #define UNHANDLED_EXCEPTION(handler) \
  192. EXCEPTION_T_STORE_GPR31 ;\
  193. EXCEPTION_T_STORE_GPR10 ;\
  194. EXCEPTION_T_STORE_SP ;\
  195. /* temporary store r3, r9 into r1, r10 */ ;\
  196. l.addi r1,r3,0x0 ;\
  197. l.addi r10,r9,0x0 ;\
  198. /* the string referenced by r3 must be low enough */ ;\
  199. l.jal _emergency_print ;\
  200. l.ori r3,r0,lo(_string_unhandled_exception) ;\
  201. l.mfspr r3,r0,SPR_NPC ;\
  202. l.jal _emergency_print_nr ;\
  203. l.andi r3,r3,0x1f00 ;\
  204. /* the string referenced by r3 must be low enough */ ;\
  205. l.jal _emergency_print ;\
  206. l.ori r3,r0,lo(_string_epc_prefix) ;\
  207. l.jal _emergency_print_nr ;\
  208. l.mfspr r3,r0,SPR_EPCR_BASE ;\
  209. l.jal _emergency_print ;\
  210. l.ori r3,r0,lo(_string_nl) ;\
  211. /* end of printing */ ;\
  212. l.addi r3,r1,0x0 ;\
  213. l.addi r9,r10,0x0 ;\
  214. /* extract current, ksp from current_set */ ;\
  215. LOAD_SYMBOL_2_GPR(r1,_unhandled_stack_top) ;\
  216. LOAD_SYMBOL_2_GPR(r10,init_thread_union) ;\
  217. /* create new stack frame, save only needed gprs */ ;\
  218. /* r1: KSP, r10: current, r31: __pa(KSP) */ ;\
  219. /* r12: temp, syscall indicator, r13 temp */ ;\
  220. l.addi r1,r1,-(INT_FRAME_SIZE) ;\
  221. /* r1 is KSP, r31 is __pa(KSP) */ ;\
  222. tophys (r31,r1) ;\
  223. l.sw PT_GPR12(r31),r12 ;\
  224. l.mfspr r12,r0,SPR_EPCR_BASE ;\
  225. l.sw PT_PC(r31),r12 ;\
  226. l.mfspr r12,r0,SPR_ESR_BASE ;\
  227. l.sw PT_SR(r31),r12 ;\
  228. /* save r31 */ ;\
  229. EXCEPTION_T_LOAD_GPR31(r12) ;\
  230. l.sw PT_GPR31(r31),r12 ;\
  231. /* save r10 as was prior to exception */ ;\
  232. EXCEPTION_T_LOAD_GPR10(r12) ;\
  233. l.sw PT_GPR10(r31),r12 ;\
  234. /* save PT_SP as was prior to exception */ ;\
  235. EXCEPTION_T_LOAD_SP(r12) ;\
  236. l.sw PT_SP(r31),r12 ;\
  237. l.sw PT_GPR13(r31),r13 ;\
  238. /* --> */ ;\
  239. /* save exception r4, set r4 = EA */ ;\
  240. l.sw PT_GPR4(r31),r4 ;\
  241. l.mfspr r4,r0,SPR_EEAR_BASE ;\
  242. /* r12 == 1 if we come from syscall */ ;\
  243. CLEAR_GPR(r12) ;\
  244. /* ----- play a MMU trick ----- */ ;\
  245. l.ori r31,r0,(EXCEPTION_SR) ;\
  246. l.mtspr r0,r31,SPR_ESR_BASE ;\
  247. /* r31: EA address of handler */ ;\
  248. LOAD_SYMBOL_2_GPR(r31,handler) ;\
  249. l.mtspr r0,r31,SPR_EPCR_BASE ;\
  250. l.rfe
  251. /* =====================================================[ exceptions] === */
  252. /* ---[ 0x100: RESET exception ]----------------------------------------- */
  253. .org 0x100
  254. /* Jump to .init code at _start which lives in the .head section
  255. * and will be discarded after boot.
  256. */
  257. LOAD_SYMBOL_2_GPR(r15, _start)
  258. tophys (r13,r15) /* MMU disabled */
  259. l.jr r13
  260. l.nop
  261. /* ---[ 0x200: BUS exception ]------------------------------------------- */
  262. .org 0x200
  263. _dispatch_bus_fault:
  264. EXCEPTION_HANDLE(_bus_fault_handler)
  265. /* ---[ 0x300: Data Page Fault exception ]------------------------------- */
  266. .org 0x300
  267. _dispatch_do_dpage_fault:
  268. // totaly disable timer interrupt
  269. // l.mtspr r0,r0,SPR_TTMR
  270. // DEBUG_TLB_PROBE(0x300)
  271. // EXCEPTION_DEBUG_VALUE_ER_ENABLED(0x300)
  272. EXCEPTION_HANDLE(_data_page_fault_handler)
  273. /* ---[ 0x400: Insn Page Fault exception ]------------------------------- */
  274. .org 0x400
  275. _dispatch_do_ipage_fault:
  276. // totaly disable timer interrupt
  277. // l.mtspr r0,r0,SPR_TTMR
  278. // DEBUG_TLB_PROBE(0x400)
  279. // EXCEPTION_DEBUG_VALUE_ER_ENABLED(0x400)
  280. EXCEPTION_HANDLE(_insn_page_fault_handler)
  281. /* ---[ 0x500: Timer exception ]----------------------------------------- */
  282. .org 0x500
  283. EXCEPTION_HANDLE(_timer_handler)
  284. /* ---[ 0x600: Aligment exception ]-------------------------------------- */
  285. .org 0x600
  286. EXCEPTION_HANDLE(_alignment_handler)
  287. /* ---[ 0x700: Illegal insn exception ]---------------------------------- */
  288. .org 0x700
  289. EXCEPTION_HANDLE(_illegal_instruction_handler)
  290. /* ---[ 0x800: External interrupt exception ]---------------------------- */
  291. .org 0x800
  292. EXCEPTION_HANDLE(_external_irq_handler)
  293. /* ---[ 0x900: DTLB miss exception ]------------------------------------- */
  294. .org 0x900
  295. l.j boot_dtlb_miss_handler
  296. l.nop
  297. /* ---[ 0xa00: ITLB miss exception ]------------------------------------- */
  298. .org 0xa00
  299. l.j boot_itlb_miss_handler
  300. l.nop
  301. /* ---[ 0xb00: Range exception ]----------------------------------------- */
  302. .org 0xb00
  303. UNHANDLED_EXCEPTION(_vector_0xb00)
  304. /* ---[ 0xc00: Syscall exception ]--------------------------------------- */
  305. .org 0xc00
  306. EXCEPTION_HANDLE(_sys_call_handler)
  307. /* ---[ 0xd00: Trap exception ]------------------------------------------ */
  308. .org 0xd00
  309. UNHANDLED_EXCEPTION(_vector_0xd00)
  310. /* ---[ 0xe00: Trap exception ]------------------------------------------ */
  311. .org 0xe00
  312. // UNHANDLED_EXCEPTION(_vector_0xe00)
  313. EXCEPTION_HANDLE(_trap_handler)
  314. /* ---[ 0xf00: Reserved exception ]-------------------------------------- */
  315. .org 0xf00
  316. UNHANDLED_EXCEPTION(_vector_0xf00)
  317. /* ---[ 0x1000: Reserved exception ]------------------------------------- */
  318. .org 0x1000
  319. UNHANDLED_EXCEPTION(_vector_0x1000)
  320. /* ---[ 0x1100: Reserved exception ]------------------------------------- */
  321. .org 0x1100
  322. UNHANDLED_EXCEPTION(_vector_0x1100)
  323. /* ---[ 0x1200: Reserved exception ]------------------------------------- */
  324. .org 0x1200
  325. UNHANDLED_EXCEPTION(_vector_0x1200)
  326. /* ---[ 0x1300: Reserved exception ]------------------------------------- */
  327. .org 0x1300
  328. UNHANDLED_EXCEPTION(_vector_0x1300)
  329. /* ---[ 0x1400: Reserved exception ]------------------------------------- */
  330. .org 0x1400
  331. UNHANDLED_EXCEPTION(_vector_0x1400)
  332. /* ---[ 0x1500: Reserved exception ]------------------------------------- */
  333. .org 0x1500
  334. UNHANDLED_EXCEPTION(_vector_0x1500)
  335. /* ---[ 0x1600: Reserved exception ]------------------------------------- */
  336. .org 0x1600
  337. UNHANDLED_EXCEPTION(_vector_0x1600)
  338. /* ---[ 0x1700: Reserved exception ]------------------------------------- */
  339. .org 0x1700
  340. UNHANDLED_EXCEPTION(_vector_0x1700)
  341. /* ---[ 0x1800: Reserved exception ]------------------------------------- */
  342. .org 0x1800
  343. UNHANDLED_EXCEPTION(_vector_0x1800)
  344. /* ---[ 0x1900: Reserved exception ]------------------------------------- */
  345. .org 0x1900
  346. UNHANDLED_EXCEPTION(_vector_0x1900)
  347. /* ---[ 0x1a00: Reserved exception ]------------------------------------- */
  348. .org 0x1a00
  349. UNHANDLED_EXCEPTION(_vector_0x1a00)
  350. /* ---[ 0x1b00: Reserved exception ]------------------------------------- */
  351. .org 0x1b00
  352. UNHANDLED_EXCEPTION(_vector_0x1b00)
  353. /* ---[ 0x1c00: Reserved exception ]------------------------------------- */
  354. .org 0x1c00
  355. UNHANDLED_EXCEPTION(_vector_0x1c00)
  356. /* ---[ 0x1d00: Reserved exception ]------------------------------------- */
  357. .org 0x1d00
  358. UNHANDLED_EXCEPTION(_vector_0x1d00)
  359. /* ---[ 0x1e00: Reserved exception ]------------------------------------- */
  360. .org 0x1e00
  361. UNHANDLED_EXCEPTION(_vector_0x1e00)
  362. /* ---[ 0x1f00: Reserved exception ]------------------------------------- */
  363. .org 0x1f00
  364. UNHANDLED_EXCEPTION(_vector_0x1f00)
  365. .org 0x2000
  366. /* ===================================================[ kernel start ]=== */
  367. /* .text*/
  368. /* This early stuff belongs in HEAD, but some of the functions below definitely
  369. * don't... */
  370. __HEAD
  371. .global _start
  372. _start:
  373. /* save kernel parameters */
  374. l.or r25,r0,r3 /* pointer to fdt */
  375. /*
  376. * ensure a deterministic start
  377. */
  378. l.ori r3,r0,0x1
  379. l.mtspr r0,r3,SPR_SR
  380. CLEAR_GPR(r1)
  381. CLEAR_GPR(r2)
  382. CLEAR_GPR(r3)
  383. CLEAR_GPR(r4)
  384. CLEAR_GPR(r5)
  385. CLEAR_GPR(r6)
  386. CLEAR_GPR(r7)
  387. CLEAR_GPR(r8)
  388. CLEAR_GPR(r9)
  389. CLEAR_GPR(r10)
  390. CLEAR_GPR(r11)
  391. CLEAR_GPR(r12)
  392. CLEAR_GPR(r13)
  393. CLEAR_GPR(r14)
  394. CLEAR_GPR(r15)
  395. CLEAR_GPR(r16)
  396. CLEAR_GPR(r17)
  397. CLEAR_GPR(r18)
  398. CLEAR_GPR(r19)
  399. CLEAR_GPR(r20)
  400. CLEAR_GPR(r21)
  401. CLEAR_GPR(r22)
  402. CLEAR_GPR(r23)
  403. CLEAR_GPR(r24)
  404. CLEAR_GPR(r26)
  405. CLEAR_GPR(r27)
  406. CLEAR_GPR(r28)
  407. CLEAR_GPR(r29)
  408. CLEAR_GPR(r30)
  409. CLEAR_GPR(r31)
  410. /*
  411. * set up initial ksp and current
  412. */
  413. LOAD_SYMBOL_2_GPR(r1,init_thread_union+0x2000) // setup kernel stack
  414. LOAD_SYMBOL_2_GPR(r10,init_thread_union) // setup current
  415. tophys (r31,r10)
  416. l.sw TI_KSP(r31), r1
  417. l.ori r4,r0,0x0
  418. /*
  419. * .data contains initialized data,
  420. * .bss contains uninitialized data - clear it up
  421. */
  422. clear_bss:
  423. LOAD_SYMBOL_2_GPR(r24, __bss_start)
  424. LOAD_SYMBOL_2_GPR(r26, _end)
  425. tophys(r28,r24)
  426. tophys(r30,r26)
  427. CLEAR_GPR(r24)
  428. CLEAR_GPR(r26)
  429. 1:
  430. l.sw (0)(r28),r0
  431. l.sfltu r28,r30
  432. l.bf 1b
  433. l.addi r28,r28,4
  434. enable_ic:
  435. l.jal _ic_enable
  436. l.nop
  437. enable_dc:
  438. l.jal _dc_enable
  439. l.nop
  440. flush_tlb:
  441. /*
  442. * I N V A L I D A T E T L B e n t r i e s
  443. */
  444. LOAD_SYMBOL_2_GPR(r5,SPR_DTLBMR_BASE(0))
  445. LOAD_SYMBOL_2_GPR(r6,SPR_ITLBMR_BASE(0))
  446. l.addi r7,r0,128 /* Maximum number of sets */
  447. 1:
  448. l.mtspr r5,r0,0x0
  449. l.mtspr r6,r0,0x0
  450. l.addi r5,r5,1
  451. l.addi r6,r6,1
  452. l.sfeq r7,r0
  453. l.bnf 1b
  454. l.addi r7,r7,-1
  455. /* The MMU needs to be enabled before or32_early_setup is called */
  456. enable_mmu:
  457. /*
  458. * enable dmmu & immu
  459. * SR[5] = 0, SR[6] = 0, 6th and 7th bit of SR set to 0
  460. */
  461. l.mfspr r30,r0,SPR_SR
  462. l.movhi r28,hi(SPR_SR_DME | SPR_SR_IME)
  463. l.ori r28,r28,lo(SPR_SR_DME | SPR_SR_IME)
  464. l.or r30,r30,r28
  465. l.mtspr r0,r30,SPR_SR
  466. l.nop
  467. l.nop
  468. l.nop
  469. l.nop
  470. l.nop
  471. l.nop
  472. l.nop
  473. l.nop
  474. l.nop
  475. l.nop
  476. l.nop
  477. l.nop
  478. l.nop
  479. l.nop
  480. l.nop
  481. l.nop
  482. // reset the simulation counters
  483. l.nop 5
  484. /* check fdt header magic word */
  485. l.lwz r3,0(r25) /* load magic from fdt into r3 */
  486. l.movhi r4,hi(OF_DT_HEADER)
  487. l.ori r4,r4,lo(OF_DT_HEADER)
  488. l.sfeq r3,r4
  489. l.bf _fdt_found
  490. l.nop
  491. /* magic number mismatch, set fdt pointer to null */
  492. l.or r25,r0,r0
  493. _fdt_found:
  494. /* pass fdt pointer to or32_early_setup in r3 */
  495. l.or r3,r0,r25
  496. LOAD_SYMBOL_2_GPR(r24, or32_early_setup)
  497. l.jalr r24
  498. l.nop
  499. clear_regs:
  500. /*
  501. * clear all GPRS to increase determinism
  502. */
  503. CLEAR_GPR(r2)
  504. CLEAR_GPR(r3)
  505. CLEAR_GPR(r4)
  506. CLEAR_GPR(r5)
  507. CLEAR_GPR(r6)
  508. CLEAR_GPR(r7)
  509. CLEAR_GPR(r8)
  510. CLEAR_GPR(r9)
  511. CLEAR_GPR(r11)
  512. CLEAR_GPR(r12)
  513. CLEAR_GPR(r13)
  514. CLEAR_GPR(r14)
  515. CLEAR_GPR(r15)
  516. CLEAR_GPR(r16)
  517. CLEAR_GPR(r17)
  518. CLEAR_GPR(r18)
  519. CLEAR_GPR(r19)
  520. CLEAR_GPR(r20)
  521. CLEAR_GPR(r21)
  522. CLEAR_GPR(r22)
  523. CLEAR_GPR(r23)
  524. CLEAR_GPR(r24)
  525. CLEAR_GPR(r25)
  526. CLEAR_GPR(r26)
  527. CLEAR_GPR(r27)
  528. CLEAR_GPR(r28)
  529. CLEAR_GPR(r29)
  530. CLEAR_GPR(r30)
  531. CLEAR_GPR(r31)
  532. jump_start_kernel:
  533. /*
  534. * jump to kernel entry (start_kernel)
  535. */
  536. LOAD_SYMBOL_2_GPR(r30, start_kernel)
  537. l.jr r30
  538. l.nop
  539. /* ========================================[ cache ]=== */
  540. /* aligment here so we don't change memory offsets with
  541. * memory controler defined
  542. */
  543. .align 0x2000
  544. _ic_enable:
  545. /* Check if IC present and skip enabling otherwise */
  546. l.mfspr r24,r0,SPR_UPR
  547. l.andi r26,r24,SPR_UPR_ICP
  548. l.sfeq r26,r0
  549. l.bf 9f
  550. l.nop
  551. /* Disable IC */
  552. l.mfspr r6,r0,SPR_SR
  553. l.addi r5,r0,-1
  554. l.xori r5,r5,SPR_SR_ICE
  555. l.and r5,r6,r5
  556. l.mtspr r0,r5,SPR_SR
  557. /* Establish cache block size
  558. If BS=0, 16;
  559. If BS=1, 32;
  560. r14 contain block size
  561. */
  562. l.mfspr r24,r0,SPR_ICCFGR
  563. l.andi r26,r24,SPR_ICCFGR_CBS
  564. l.srli r28,r26,7
  565. l.ori r30,r0,16
  566. l.sll r14,r30,r28
  567. /* Establish number of cache sets
  568. r16 contains number of cache sets
  569. r28 contains log(# of cache sets)
  570. */
  571. l.andi r26,r24,SPR_ICCFGR_NCS
  572. l.srli r28,r26,3
  573. l.ori r30,r0,1
  574. l.sll r16,r30,r28
  575. /* Invalidate IC */
  576. l.addi r6,r0,0
  577. l.sll r5,r14,r28
  578. // l.mul r5,r14,r16
  579. // l.trap 1
  580. // l.addi r5,r0,IC_SIZE
  581. 1:
  582. l.mtspr r0,r6,SPR_ICBIR
  583. l.sfne r6,r5
  584. l.bf 1b
  585. l.add r6,r6,r14
  586. // l.addi r6,r6,IC_LINE
  587. /* Enable IC */
  588. l.mfspr r6,r0,SPR_SR
  589. l.ori r6,r6,SPR_SR_ICE
  590. l.mtspr r0,r6,SPR_SR
  591. l.nop
  592. l.nop
  593. l.nop
  594. l.nop
  595. l.nop
  596. l.nop
  597. l.nop
  598. l.nop
  599. l.nop
  600. l.nop
  601. 9:
  602. l.jr r9
  603. l.nop
  604. _dc_enable:
  605. /* Check if DC present and skip enabling otherwise */
  606. l.mfspr r24,r0,SPR_UPR
  607. l.andi r26,r24,SPR_UPR_DCP
  608. l.sfeq r26,r0
  609. l.bf 9f
  610. l.nop
  611. /* Disable DC */
  612. l.mfspr r6,r0,SPR_SR
  613. l.addi r5,r0,-1
  614. l.xori r5,r5,SPR_SR_DCE
  615. l.and r5,r6,r5
  616. l.mtspr r0,r5,SPR_SR
  617. /* Establish cache block size
  618. If BS=0, 16;
  619. If BS=1, 32;
  620. r14 contain block size
  621. */
  622. l.mfspr r24,r0,SPR_DCCFGR
  623. l.andi r26,r24,SPR_DCCFGR_CBS
  624. l.srli r28,r26,7
  625. l.ori r30,r0,16
  626. l.sll r14,r30,r28
  627. /* Establish number of cache sets
  628. r16 contains number of cache sets
  629. r28 contains log(# of cache sets)
  630. */
  631. l.andi r26,r24,SPR_DCCFGR_NCS
  632. l.srli r28,r26,3
  633. l.ori r30,r0,1
  634. l.sll r16,r30,r28
  635. /* Invalidate DC */
  636. l.addi r6,r0,0
  637. l.sll r5,r14,r28
  638. 1:
  639. l.mtspr r0,r6,SPR_DCBIR
  640. l.sfne r6,r5
  641. l.bf 1b
  642. l.add r6,r6,r14
  643. /* Enable DC */
  644. l.mfspr r6,r0,SPR_SR
  645. l.ori r6,r6,SPR_SR_DCE
  646. l.mtspr r0,r6,SPR_SR
  647. 9:
  648. l.jr r9
  649. l.nop
  650. /* ===============================================[ page table masks ]=== */
  651. #define DTLB_UP_CONVERT_MASK 0x3fa
  652. #define ITLB_UP_CONVERT_MASK 0x3a
  653. /* for SMP we'd have (this is a bit subtle, CC must be always set
  654. * for SMP, but since we have _PAGE_PRESENT bit always defined
  655. * we can just modify the mask)
  656. */
  657. #define DTLB_SMP_CONVERT_MASK 0x3fb
  658. #define ITLB_SMP_CONVERT_MASK 0x3b
  659. /* ---[ boot dtlb miss handler ]----------------------------------------- */
  660. boot_dtlb_miss_handler:
  661. /* mask for DTLB_MR register: - (0) sets V (valid) bit,
  662. * - (31-12) sets bits belonging to VPN (31-12)
  663. */
  664. #define DTLB_MR_MASK 0xfffff001
  665. /* mask for DTLB_TR register: - (2) sets CI (cache inhibit) bit,
  666. * - (4) sets A (access) bit,
  667. * - (5) sets D (dirty) bit,
  668. * - (8) sets SRE (superuser read) bit
  669. * - (9) sets SWE (superuser write) bit
  670. * - (31-12) sets bits belonging to VPN (31-12)
  671. */
  672. #define DTLB_TR_MASK 0xfffff332
  673. /* These are for masking out the VPN/PPN value from the MR/TR registers...
  674. * it's not the same as the PFN */
  675. #define VPN_MASK 0xfffff000
  676. #define PPN_MASK 0xfffff000
  677. EXCEPTION_STORE_GPR6
  678. #if 0
  679. l.mfspr r6,r0,SPR_ESR_BASE //
  680. l.andi r6,r6,SPR_SR_SM // are we in kernel mode ?
  681. l.sfeqi r6,0 // r6 == 0x1 --> SM
  682. l.bf exit_with_no_dtranslation //
  683. l.nop
  684. #endif
  685. /* this could be optimized by moving storing of
  686. * non r6 registers here, and jumping r6 restore
  687. * if not in supervisor mode
  688. */
  689. EXCEPTION_STORE_GPR2
  690. EXCEPTION_STORE_GPR3
  691. EXCEPTION_STORE_GPR4
  692. EXCEPTION_STORE_GPR5
  693. l.mfspr r4,r0,SPR_EEAR_BASE // get the offending EA
  694. immediate_translation:
  695. CLEAR_GPR(r6)
  696. l.srli r3,r4,0xd // r3 <- r4 / 8192 (sets are relative to page size (8Kb) NOT VPN size (4Kb)
  697. l.mfspr r6, r0, SPR_DMMUCFGR
  698. l.andi r6, r6, SPR_DMMUCFGR_NTS
  699. l.srli r6, r6, SPR_DMMUCFGR_NTS_OFF
  700. l.ori r5, r0, 0x1
  701. l.sll r5, r5, r6 // r5 = number DMMU sets
  702. l.addi r6, r5, -1 // r6 = nsets mask
  703. l.and r2, r3, r6 // r2 <- r3 % NSETS_MASK
  704. l.or r6,r6,r4 // r6 <- r4
  705. l.ori r6,r6,~(VPN_MASK) // r6 <- VPN :VPN .xfff - clear up lo(r6) to 0x**** *fff
  706. l.movhi r5,hi(DTLB_MR_MASK) // r5 <- ffff:0000.x000
  707. l.ori r5,r5,lo(DTLB_MR_MASK) // r5 <- ffff:1111.x001 - apply DTLB_MR_MASK
  708. l.and r5,r5,r6 // r5 <- VPN :VPN .x001 - we have DTLBMR entry
  709. l.mtspr r2,r5,SPR_DTLBMR_BASE(0) // set DTLBMR
  710. /* set up DTLB with no translation for EA <= 0xbfffffff */
  711. LOAD_SYMBOL_2_GPR(r6,0xbfffffff)
  712. l.sfgeu r6,r4 // flag if r6 >= r4 (if 0xbfffffff >= EA)
  713. l.bf 1f // goto out
  714. l.and r3,r4,r4 // delay slot :: 24 <- r4 (if flag==1)
  715. tophys(r3,r4) // r3 <- PA
  716. 1:
  717. l.ori r3,r3,~(PPN_MASK) // r3 <- PPN :PPN .xfff - clear up lo(r6) to 0x**** *fff
  718. l.movhi r5,hi(DTLB_TR_MASK) // r5 <- ffff:0000.x000
  719. l.ori r5,r5,lo(DTLB_TR_MASK) // r5 <- ffff:1111.x330 - apply DTLB_MR_MASK
  720. l.and r5,r5,r3 // r5 <- PPN :PPN .x330 - we have DTLBTR entry
  721. l.mtspr r2,r5,SPR_DTLBTR_BASE(0) // set DTLBTR
  722. EXCEPTION_LOAD_GPR6
  723. EXCEPTION_LOAD_GPR5
  724. EXCEPTION_LOAD_GPR4
  725. EXCEPTION_LOAD_GPR3
  726. EXCEPTION_LOAD_GPR2
  727. l.rfe // SR <- ESR, PC <- EPC
  728. exit_with_no_dtranslation:
  729. /* EA out of memory or not in supervisor mode */
  730. EXCEPTION_LOAD_GPR6
  731. EXCEPTION_LOAD_GPR4
  732. l.j _dispatch_bus_fault
  733. /* ---[ boot itlb miss handler ]----------------------------------------- */
  734. boot_itlb_miss_handler:
  735. /* mask for ITLB_MR register: - sets V (valid) bit,
  736. * - sets bits belonging to VPN (15-12)
  737. */
  738. #define ITLB_MR_MASK 0xfffff001
  739. /* mask for ITLB_TR register: - sets A (access) bit,
  740. * - sets SXE (superuser execute) bit
  741. * - sets bits belonging to VPN (15-12)
  742. */
  743. #define ITLB_TR_MASK 0xfffff050
  744. /*
  745. #define VPN_MASK 0xffffe000
  746. #define PPN_MASK 0xffffe000
  747. */
  748. EXCEPTION_STORE_GPR2
  749. EXCEPTION_STORE_GPR3
  750. EXCEPTION_STORE_GPR4
  751. EXCEPTION_STORE_GPR5
  752. EXCEPTION_STORE_GPR6
  753. #if 0
  754. l.mfspr r6,r0,SPR_ESR_BASE //
  755. l.andi r6,r6,SPR_SR_SM // are we in kernel mode ?
  756. l.sfeqi r6,0 // r6 == 0x1 --> SM
  757. l.bf exit_with_no_itranslation
  758. l.nop
  759. #endif
  760. l.mfspr r4,r0,SPR_EEAR_BASE // get the offending EA
  761. earlyearly:
  762. CLEAR_GPR(r6)
  763. l.srli r3,r4,0xd // r3 <- r4 / 8192 (sets are relative to page size (8Kb) NOT VPN size (4Kb)
  764. l.mfspr r6, r0, SPR_IMMUCFGR
  765. l.andi r6, r6, SPR_IMMUCFGR_NTS
  766. l.srli r6, r6, SPR_IMMUCFGR_NTS_OFF
  767. l.ori r5, r0, 0x1
  768. l.sll r5, r5, r6 // r5 = number IMMU sets from IMMUCFGR
  769. l.addi r6, r5, -1 // r6 = nsets mask
  770. l.and r2, r3, r6 // r2 <- r3 % NSETS_MASK
  771. l.or r6,r6,r4 // r6 <- r4
  772. l.ori r6,r6,~(VPN_MASK) // r6 <- VPN :VPN .xfff - clear up lo(r6) to 0x**** *fff
  773. l.movhi r5,hi(ITLB_MR_MASK) // r5 <- ffff:0000.x000
  774. l.ori r5,r5,lo(ITLB_MR_MASK) // r5 <- ffff:1111.x001 - apply ITLB_MR_MASK
  775. l.and r5,r5,r6 // r5 <- VPN :VPN .x001 - we have ITLBMR entry
  776. l.mtspr r2,r5,SPR_ITLBMR_BASE(0) // set ITLBMR
  777. /*
  778. * set up ITLB with no translation for EA <= 0x0fffffff
  779. *
  780. * we need this for head.S mapping (EA = PA). if we move all functions
  781. * which run with mmu enabled into entry.S, we might be able to eliminate this.
  782. *
  783. */
  784. LOAD_SYMBOL_2_GPR(r6,0x0fffffff)
  785. l.sfgeu r6,r4 // flag if r6 >= r4 (if 0xb0ffffff >= EA)
  786. l.bf 1f // goto out
  787. l.and r3,r4,r4 // delay slot :: 24 <- r4 (if flag==1)
  788. tophys(r3,r4) // r3 <- PA
  789. 1:
  790. l.ori r3,r3,~(PPN_MASK) // r3 <- PPN :PPN .xfff - clear up lo(r6) to 0x**** *fff
  791. l.movhi r5,hi(ITLB_TR_MASK) // r5 <- ffff:0000.x000
  792. l.ori r5,r5,lo(ITLB_TR_MASK) // r5 <- ffff:1111.x050 - apply ITLB_MR_MASK
  793. l.and r5,r5,r3 // r5 <- PPN :PPN .x050 - we have ITLBTR entry
  794. l.mtspr r2,r5,SPR_ITLBTR_BASE(0) // set ITLBTR
  795. EXCEPTION_LOAD_GPR6
  796. EXCEPTION_LOAD_GPR5
  797. EXCEPTION_LOAD_GPR4
  798. EXCEPTION_LOAD_GPR3
  799. EXCEPTION_LOAD_GPR2
  800. l.rfe // SR <- ESR, PC <- EPC
  801. exit_with_no_itranslation:
  802. EXCEPTION_LOAD_GPR4
  803. EXCEPTION_LOAD_GPR6
  804. l.j _dispatch_bus_fault
  805. l.nop
  806. /* ====================================================================== */
  807. /*
  808. * Stuff below here shouldn't go into .head section... maybe this stuff
  809. * can be moved to entry.S ???
  810. */
  811. /* ==============================================[ DTLB miss handler ]=== */
  812. /*
  813. * Comments:
  814. * Exception handlers are entered with MMU off so the following handler
  815. * needs to use physical addressing
  816. *
  817. */
  818. .text
  819. ENTRY(dtlb_miss_handler)
  820. EXCEPTION_STORE_GPR2
  821. EXCEPTION_STORE_GPR3
  822. EXCEPTION_STORE_GPR4
  823. EXCEPTION_STORE_GPR5
  824. EXCEPTION_STORE_GPR6
  825. /*
  826. * get EA of the miss
  827. */
  828. l.mfspr r2,r0,SPR_EEAR_BASE
  829. /*
  830. * pmd = (pmd_t *)(current_pgd + pgd_index(daddr));
  831. */
  832. GET_CURRENT_PGD(r3,r5) // r3 is current_pgd, r5 is temp
  833. l.srli r4,r2,0x18 // >> PAGE_SHIFT + (PAGE_SHIFT - 2)
  834. l.slli r4,r4,0x2 // to get address << 2
  835. l.add r5,r4,r3 // r4 is pgd_index(daddr)
  836. /*
  837. * if (pmd_none(*pmd))
  838. * goto pmd_none:
  839. */
  840. tophys (r4,r5)
  841. l.lwz r3,0x0(r4) // get *pmd value
  842. l.sfne r3,r0
  843. l.bnf d_pmd_none
  844. l.andi r3,r3,~PAGE_MASK //0x1fff // ~PAGE_MASK
  845. /*
  846. * if (pmd_bad(*pmd))
  847. * pmd_clear(pmd)
  848. * goto pmd_bad:
  849. */
  850. // l.sfeq r3,r0 // check *pmd value
  851. // l.bf d_pmd_good
  852. l.addi r3,r0,0xffffe000 // PAGE_MASK
  853. // l.j d_pmd_bad
  854. // l.sw 0x0(r4),r0 // clear pmd
  855. d_pmd_good:
  856. /*
  857. * pte = *pte_offset(pmd, daddr);
  858. */
  859. l.lwz r4,0x0(r4) // get **pmd value
  860. l.and r4,r4,r3 // & PAGE_MASK
  861. l.srli r5,r2,0xd // >> PAGE_SHIFT, r2 == EEAR
  862. l.andi r3,r5,0x7ff // (1UL << PAGE_SHIFT - 2) - 1
  863. l.slli r3,r3,0x2 // to get address << 2
  864. l.add r3,r3,r4
  865. l.lwz r2,0x0(r3) // this is pte at last
  866. /*
  867. * if (!pte_present(pte))
  868. */
  869. l.andi r4,r2,0x1
  870. l.sfne r4,r0 // is pte present
  871. l.bnf d_pte_not_present
  872. l.addi r3,r0,0xffffe3fa // PAGE_MASK | DTLB_UP_CONVERT_MASK
  873. /*
  874. * fill DTLB TR register
  875. */
  876. l.and r4,r2,r3 // apply the mask
  877. // Determine number of DMMU sets
  878. l.mfspr r6, r0, SPR_DMMUCFGR
  879. l.andi r6, r6, SPR_DMMUCFGR_NTS
  880. l.srli r6, r6, SPR_DMMUCFGR_NTS_OFF
  881. l.ori r3, r0, 0x1
  882. l.sll r3, r3, r6 // r3 = number DMMU sets DMMUCFGR
  883. l.addi r6, r3, -1 // r6 = nsets mask
  884. l.and r5, r5, r6 // calc offset: & (NUM_TLB_ENTRIES-1)
  885. //NUM_TLB_ENTRIES
  886. l.mtspr r5,r4,SPR_DTLBTR_BASE(0)
  887. /*
  888. * fill DTLB MR register
  889. */
  890. l.mfspr r2,r0,SPR_EEAR_BASE
  891. l.addi r3,r0,0xffffe000 // PAGE_MASK
  892. l.and r4,r2,r3 // apply PAGE_MASK to EA (__PHX__ do we really need this?)
  893. l.ori r4,r4,0x1 // set hardware valid bit: DTBL_MR entry
  894. l.mtspr r5,r4,SPR_DTLBMR_BASE(0)
  895. EXCEPTION_LOAD_GPR2
  896. EXCEPTION_LOAD_GPR3
  897. EXCEPTION_LOAD_GPR4
  898. EXCEPTION_LOAD_GPR5
  899. EXCEPTION_LOAD_GPR6
  900. l.rfe
  901. d_pmd_bad:
  902. l.nop 1
  903. EXCEPTION_LOAD_GPR2
  904. EXCEPTION_LOAD_GPR3
  905. EXCEPTION_LOAD_GPR4
  906. EXCEPTION_LOAD_GPR5
  907. EXCEPTION_LOAD_GPR6
  908. l.rfe
  909. d_pmd_none:
  910. d_pte_not_present:
  911. EXCEPTION_LOAD_GPR2
  912. EXCEPTION_LOAD_GPR3
  913. EXCEPTION_LOAD_GPR4
  914. EXCEPTION_LOAD_GPR5
  915. EXCEPTION_LOAD_GPR6
  916. EXCEPTION_HANDLE(_dtlb_miss_page_fault_handler)
  917. /* ==============================================[ ITLB miss handler ]=== */
  918. ENTRY(itlb_miss_handler)
  919. EXCEPTION_STORE_GPR2
  920. EXCEPTION_STORE_GPR3
  921. EXCEPTION_STORE_GPR4
  922. EXCEPTION_STORE_GPR5
  923. EXCEPTION_STORE_GPR6
  924. /*
  925. * get EA of the miss
  926. */
  927. l.mfspr r2,r0,SPR_EEAR_BASE
  928. /*
  929. * pmd = (pmd_t *)(current_pgd + pgd_index(daddr));
  930. *
  931. */
  932. GET_CURRENT_PGD(r3,r5) // r3 is current_pgd, r5 is temp
  933. l.srli r4,r2,0x18 // >> PAGE_SHIFT + (PAGE_SHIFT - 2)
  934. l.slli r4,r4,0x2 // to get address << 2
  935. l.add r5,r4,r3 // r4 is pgd_index(daddr)
  936. /*
  937. * if (pmd_none(*pmd))
  938. * goto pmd_none:
  939. */
  940. tophys (r4,r5)
  941. l.lwz r3,0x0(r4) // get *pmd value
  942. l.sfne r3,r0
  943. l.bnf i_pmd_none
  944. l.andi r3,r3,0x1fff // ~PAGE_MASK
  945. /*
  946. * if (pmd_bad(*pmd))
  947. * pmd_clear(pmd)
  948. * goto pmd_bad:
  949. */
  950. // l.sfeq r3,r0 // check *pmd value
  951. // l.bf i_pmd_good
  952. l.addi r3,r0,0xffffe000 // PAGE_MASK
  953. // l.j i_pmd_bad
  954. // l.sw 0x0(r4),r0 // clear pmd
  955. i_pmd_good:
  956. /*
  957. * pte = *pte_offset(pmd, iaddr);
  958. *
  959. */
  960. l.lwz r4,0x0(r4) // get **pmd value
  961. l.and r4,r4,r3 // & PAGE_MASK
  962. l.srli r5,r2,0xd // >> PAGE_SHIFT, r2 == EEAR
  963. l.andi r3,r5,0x7ff // (1UL << PAGE_SHIFT - 2) - 1
  964. l.slli r3,r3,0x2 // to get address << 2
  965. l.add r3,r3,r4
  966. l.lwz r2,0x0(r3) // this is pte at last
  967. /*
  968. * if (!pte_present(pte))
  969. *
  970. */
  971. l.andi r4,r2,0x1
  972. l.sfne r4,r0 // is pte present
  973. l.bnf i_pte_not_present
  974. l.addi r3,r0,0xffffe03a // PAGE_MASK | ITLB_UP_CONVERT_MASK
  975. /*
  976. * fill ITLB TR register
  977. */
  978. l.and r4,r2,r3 // apply the mask
  979. l.andi r3,r2,0x7c0 // _PAGE_EXEC | _PAGE_SRE | _PAGE_SWE | _PAGE_URE | _PAGE_UWE
  980. // l.andi r3,r2,0x400 // _PAGE_EXEC
  981. l.sfeq r3,r0
  982. l.bf itlb_tr_fill //_workaround
  983. // Determine number of IMMU sets
  984. l.mfspr r6, r0, SPR_IMMUCFGR
  985. l.andi r6, r6, SPR_IMMUCFGR_NTS
  986. l.srli r6, r6, SPR_IMMUCFGR_NTS_OFF
  987. l.ori r3, r0, 0x1
  988. l.sll r3, r3, r6 // r3 = number IMMU sets IMMUCFGR
  989. l.addi r6, r3, -1 // r6 = nsets mask
  990. l.and r5, r5, r6 // calc offset: & (NUM_TLB_ENTRIES-1)
  991. /*
  992. * __PHX__ :: fixme
  993. * we should not just blindly set executable flags,
  994. * but it does help with ping. the clean way would be to find out
  995. * (and fix it) why stack doesn't have execution permissions
  996. */
  997. itlb_tr_fill_workaround:
  998. l.ori r4,r4,0xc0 // | (SPR_ITLBTR_UXE | ITLBTR_SXE)
  999. itlb_tr_fill:
  1000. l.mtspr r5,r4,SPR_ITLBTR_BASE(0)
  1001. /*
  1002. * fill DTLB MR register
  1003. */
  1004. l.mfspr r2,r0,SPR_EEAR_BASE
  1005. l.addi r3,r0,0xffffe000 // PAGE_MASK
  1006. l.and r4,r2,r3 // apply PAGE_MASK to EA (__PHX__ do we really need this?)
  1007. l.ori r4,r4,0x1 // set hardware valid bit: DTBL_MR entry
  1008. l.mtspr r5,r4,SPR_ITLBMR_BASE(0)
  1009. EXCEPTION_LOAD_GPR2
  1010. EXCEPTION_LOAD_GPR3
  1011. EXCEPTION_LOAD_GPR4
  1012. EXCEPTION_LOAD_GPR5
  1013. EXCEPTION_LOAD_GPR6
  1014. l.rfe
  1015. i_pmd_bad:
  1016. l.nop 1
  1017. EXCEPTION_LOAD_GPR2
  1018. EXCEPTION_LOAD_GPR3
  1019. EXCEPTION_LOAD_GPR4
  1020. EXCEPTION_LOAD_GPR5
  1021. EXCEPTION_LOAD_GPR6
  1022. l.rfe
  1023. i_pmd_none:
  1024. i_pte_not_present:
  1025. EXCEPTION_LOAD_GPR2
  1026. EXCEPTION_LOAD_GPR3
  1027. EXCEPTION_LOAD_GPR4
  1028. EXCEPTION_LOAD_GPR5
  1029. EXCEPTION_LOAD_GPR6
  1030. EXCEPTION_HANDLE(_itlb_miss_page_fault_handler)
  1031. /* ==============================================[ boot tlb handlers ]=== */
  1032. /* =================================================[ debugging aids ]=== */
  1033. .align 64
  1034. _immu_trampoline:
  1035. .space 64
  1036. _immu_trampoline_top:
  1037. #define TRAMP_SLOT_0 (0x0)
  1038. #define TRAMP_SLOT_1 (0x4)
  1039. #define TRAMP_SLOT_2 (0x8)
  1040. #define TRAMP_SLOT_3 (0xc)
  1041. #define TRAMP_SLOT_4 (0x10)
  1042. #define TRAMP_SLOT_5 (0x14)
  1043. #define TRAMP_FRAME_SIZE (0x18)
  1044. ENTRY(_immu_trampoline_workaround)
  1045. // r2 EEA
  1046. // r6 is physical EEA
  1047. tophys(r6,r2)
  1048. LOAD_SYMBOL_2_GPR(r5,_immu_trampoline)
  1049. tophys (r3,r5) // r3 is trampoline (physical)
  1050. LOAD_SYMBOL_2_GPR(r4,0x15000000)
  1051. l.sw TRAMP_SLOT_0(r3),r4
  1052. l.sw TRAMP_SLOT_1(r3),r4
  1053. l.sw TRAMP_SLOT_4(r3),r4
  1054. l.sw TRAMP_SLOT_5(r3),r4
  1055. // EPC = EEA - 0x4
  1056. l.lwz r4,0x0(r6) // load op @ EEA + 0x0 (fc address)
  1057. l.sw TRAMP_SLOT_3(r3),r4 // store it to _immu_trampoline_data
  1058. l.lwz r4,-0x4(r6) // load op @ EEA - 0x4 (f8 address)
  1059. l.sw TRAMP_SLOT_2(r3),r4 // store it to _immu_trampoline_data
  1060. l.srli r5,r4,26 // check opcode for write access
  1061. l.sfeqi r5,0 // l.j
  1062. l.bf 0f
  1063. l.sfeqi r5,0x11 // l.jr
  1064. l.bf 1f
  1065. l.sfeqi r5,1 // l.jal
  1066. l.bf 2f
  1067. l.sfeqi r5,0x12 // l.jalr
  1068. l.bf 3f
  1069. l.sfeqi r5,3 // l.bnf
  1070. l.bf 4f
  1071. l.sfeqi r5,4 // l.bf
  1072. l.bf 5f
  1073. 99:
  1074. l.nop
  1075. l.j 99b // should never happen
  1076. l.nop 1
  1077. // r2 is EEA
  1078. // r3 is trampoline address (physical)
  1079. // r4 is instruction
  1080. // r6 is physical(EEA)
  1081. //
  1082. // r5
  1083. 2: // l.jal
  1084. /* 19 20 aa aa l.movhi r9,0xaaaa
  1085. * a9 29 bb bb l.ori r9,0xbbbb
  1086. *
  1087. * where 0xaaaabbbb is EEA + 0x4 shifted right 2
  1088. */
  1089. l.addi r6,r2,0x4 // this is 0xaaaabbbb
  1090. // l.movhi r9,0xaaaa
  1091. l.ori r5,r0,0x1920 // 0x1920 == l.movhi r9
  1092. l.sh (TRAMP_SLOT_0+0x0)(r3),r5
  1093. l.srli r5,r6,16
  1094. l.sh (TRAMP_SLOT_0+0x2)(r3),r5
  1095. // l.ori r9,0xbbbb
  1096. l.ori r5,r0,0xa929 // 0xa929 == l.ori r9
  1097. l.sh (TRAMP_SLOT_1+0x0)(r3),r5
  1098. l.andi r5,r6,0xffff
  1099. l.sh (TRAMP_SLOT_1+0x2)(r3),r5
  1100. /* falthrough, need to set up new jump offset */
  1101. 0: // l.j
  1102. l.slli r6,r4,6 // original offset shifted left 6 - 2
  1103. // l.srli r6,r6,6 // original offset shifted right 2
  1104. l.slli r4,r2,4 // old jump position: EEA shifted left 4
  1105. // l.srli r4,r4,6 // old jump position: shifted right 2
  1106. l.addi r5,r3,0xc // new jump position (physical)
  1107. l.slli r5,r5,4 // new jump position: shifted left 4
  1108. // calculate new jump offset
  1109. // new_off = old_off + (old_jump - new_jump)
  1110. l.sub r5,r4,r5 // old_jump - new_jump
  1111. l.add r5,r6,r5 // orig_off + (old_jump - new_jump)
  1112. l.srli r5,r5,6 // new offset shifted right 2
  1113. // r5 is new jump offset
  1114. // l.j has opcode 0x0...
  1115. l.sw TRAMP_SLOT_2(r3),r5 // write it back
  1116. l.j trampoline_out
  1117. l.nop
  1118. /* ----------------------------- */
  1119. 3: // l.jalr
  1120. /* 19 20 aa aa l.movhi r9,0xaaaa
  1121. * a9 29 bb bb l.ori r9,0xbbbb
  1122. *
  1123. * where 0xaaaabbbb is EEA + 0x4 shifted right 2
  1124. */
  1125. l.addi r6,r2,0x4 // this is 0xaaaabbbb
  1126. // l.movhi r9,0xaaaa
  1127. l.ori r5,r0,0x1920 // 0x1920 == l.movhi r9
  1128. l.sh (TRAMP_SLOT_0+0x0)(r3),r5
  1129. l.srli r5,r6,16
  1130. l.sh (TRAMP_SLOT_0+0x2)(r3),r5
  1131. // l.ori r9,0xbbbb
  1132. l.ori r5,r0,0xa929 // 0xa929 == l.ori r9
  1133. l.sh (TRAMP_SLOT_1+0x0)(r3),r5
  1134. l.andi r5,r6,0xffff
  1135. l.sh (TRAMP_SLOT_1+0x2)(r3),r5
  1136. l.lhz r5,(TRAMP_SLOT_2+0x0)(r3) // load hi part of jump instruction
  1137. l.andi r5,r5,0x3ff // clear out opcode part
  1138. l.ori r5,r5,0x4400 // opcode changed from l.jalr -> l.jr
  1139. l.sh (TRAMP_SLOT_2+0x0)(r3),r5 // write it back
  1140. /* falthrough */
  1141. 1: // l.jr
  1142. l.j trampoline_out
  1143. l.nop
  1144. /* ----------------------------- */
  1145. 4: // l.bnf
  1146. 5: // l.bf
  1147. l.slli r6,r4,6 // original offset shifted left 6 - 2
  1148. // l.srli r6,r6,6 // original offset shifted right 2
  1149. l.slli r4,r2,4 // old jump position: EEA shifted left 4
  1150. // l.srli r4,r4,6 // old jump position: shifted right 2
  1151. l.addi r5,r3,0xc // new jump position (physical)
  1152. l.slli r5,r5,4 // new jump position: shifted left 4
  1153. // calculate new jump offset
  1154. // new_off = old_off + (old_jump - new_jump)
  1155. l.add r6,r6,r4 // (orig_off + old_jump)
  1156. l.sub r6,r6,r5 // (orig_off + old_jump) - new_jump
  1157. l.srli r6,r6,6 // new offset shifted right 2
  1158. // r6 is new jump offset
  1159. l.lwz r4,(TRAMP_SLOT_2+0x0)(r3) // load jump instruction
  1160. l.srli r4,r4,16
  1161. l.andi r4,r4,0xfc00 // get opcode part
  1162. l.slli r4,r4,16
  1163. l.or r6,r4,r6 // l.b(n)f new offset
  1164. l.sw TRAMP_SLOT_2(r3),r6 // write it back
  1165. /* we need to add l.j to EEA + 0x8 */
  1166. tophys (r4,r2) // may not be needed (due to shifts down_
  1167. l.addi r4,r4,(0x8 - 0x8) // jump target = r2 + 0x8 (compensate for 0x8)
  1168. // jump position = r5 + 0x8 (0x8 compensated)
  1169. l.sub r4,r4,r5 // jump offset = target - new_position + 0x8
  1170. l.slli r4,r4,4 // the amount of info in imediate of jump
  1171. l.srli r4,r4,6 // jump instruction with offset
  1172. l.sw TRAMP_SLOT_4(r3),r4 // write it to 4th slot
  1173. /* fallthrough */
  1174. trampoline_out:
  1175. // set up new EPC to point to our trampoline code
  1176. LOAD_SYMBOL_2_GPR(r5,_immu_trampoline)
  1177. l.mtspr r0,r5,SPR_EPCR_BASE
  1178. // immu_trampoline is (4x) CACHE_LINE aligned
  1179. // and only 6 instructions long,
  1180. // so we need to invalidate only 2 lines
  1181. /* Establish cache block size
  1182. If BS=0, 16;
  1183. If BS=1, 32;
  1184. r14 contain block size
  1185. */
  1186. l.mfspr r21,r0,SPR_ICCFGR
  1187. l.andi r21,r21,SPR_ICCFGR_CBS
  1188. l.srli r21,r21,7
  1189. l.ori r23,r0,16
  1190. l.sll r14,r23,r21
  1191. l.mtspr r0,r5,SPR_ICBIR
  1192. l.add r5,r5,r14
  1193. l.mtspr r0,r5,SPR_ICBIR
  1194. l.jr r9
  1195. l.nop
  1196. /*
  1197. * DSCR: prints a string referenced by r3.
  1198. *
  1199. * PRMS: r3 - address of the first character of null
  1200. * terminated string to be printed
  1201. *
  1202. * PREQ: UART at UART_BASE_ADD has to be initialized
  1203. *
  1204. * POST: caller should be aware that r3, r9 are changed
  1205. */
  1206. ENTRY(_emergency_print)
  1207. EMERGENCY_PRINT_STORE_GPR4
  1208. EMERGENCY_PRINT_STORE_GPR5
  1209. EMERGENCY_PRINT_STORE_GPR6
  1210. EMERGENCY_PRINT_STORE_GPR7
  1211. 2:
  1212. l.lbz r7,0(r3)
  1213. l.sfeq r7,r0
  1214. l.bf 9f
  1215. l.nop
  1216. // putc:
  1217. l.movhi r4,hi(UART_BASE_ADD)
  1218. l.addi r6,r0,0x20
  1219. 1: l.lbz r5,5(r4)
  1220. l.andi r5,r5,0x20
  1221. l.sfeq r5,r6
  1222. l.bnf 1b
  1223. l.nop
  1224. l.sb 0(r4),r7
  1225. l.addi r6,r0,0x60
  1226. 1: l.lbz r5,5(r4)
  1227. l.andi r5,r5,0x60
  1228. l.sfeq r5,r6
  1229. l.bnf 1b
  1230. l.nop
  1231. /* next character */
  1232. l.j 2b
  1233. l.addi r3,r3,0x1
  1234. 9:
  1235. EMERGENCY_PRINT_LOAD_GPR7
  1236. EMERGENCY_PRINT_LOAD_GPR6
  1237. EMERGENCY_PRINT_LOAD_GPR5
  1238. EMERGENCY_PRINT_LOAD_GPR4
  1239. l.jr r9
  1240. l.nop
  1241. ENTRY(_emergency_print_nr)
  1242. EMERGENCY_PRINT_STORE_GPR4
  1243. EMERGENCY_PRINT_STORE_GPR5
  1244. EMERGENCY_PRINT_STORE_GPR6
  1245. EMERGENCY_PRINT_STORE_GPR7
  1246. EMERGENCY_PRINT_STORE_GPR8
  1247. l.addi r8,r0,32 // shift register
  1248. 1: /* remove leading zeros */
  1249. l.addi r8,r8,-0x4
  1250. l.srl r7,r3,r8
  1251. l.andi r7,r7,0xf
  1252. /* don't skip the last zero if number == 0x0 */
  1253. l.sfeqi r8,0x4
  1254. l.bf 2f
  1255. l.nop
  1256. l.sfeq r7,r0
  1257. l.bf 1b
  1258. l.nop
  1259. 2:
  1260. l.srl r7,r3,r8
  1261. l.andi r7,r7,0xf
  1262. l.sflts r8,r0
  1263. l.bf 9f
  1264. l.sfgtui r7,0x9
  1265. l.bnf 8f
  1266. l.nop
  1267. l.addi r7,r7,0x27
  1268. 8:
  1269. l.addi r7,r7,0x30
  1270. // putc:
  1271. l.movhi r4,hi(UART_BASE_ADD)
  1272. l.addi r6,r0,0x20
  1273. 1: l.lbz r5,5(r4)
  1274. l.andi r5,r5,0x20
  1275. l.sfeq r5,r6
  1276. l.bnf 1b
  1277. l.nop
  1278. l.sb 0(r4),r7
  1279. l.addi r6,r0,0x60
  1280. 1: l.lbz r5,5(r4)
  1281. l.andi r5,r5,0x60
  1282. l.sfeq r5,r6
  1283. l.bnf 1b
  1284. l.nop
  1285. /* next character */
  1286. l.j 2b
  1287. l.addi r8,r8,-0x4
  1288. 9:
  1289. EMERGENCY_PRINT_LOAD_GPR8
  1290. EMERGENCY_PRINT_LOAD_GPR7
  1291. EMERGENCY_PRINT_LOAD_GPR6
  1292. EMERGENCY_PRINT_LOAD_GPR5
  1293. EMERGENCY_PRINT_LOAD_GPR4
  1294. l.jr r9
  1295. l.nop
  1296. /*
  1297. * This should be used for debugging only.
  1298. * It messes up the Linux early serial output
  1299. * somehow, so use it sparingly and essentially
  1300. * only if you need to debug something that goes wrong
  1301. * before Linux gets the early serial going.
  1302. *
  1303. * Furthermore, you'll have to make sure you set the
  1304. * UART_DEVISOR correctly according to the system
  1305. * clock rate.
  1306. *
  1307. *
  1308. */
  1309. #define SYS_CLK 20000000
  1310. //#define SYS_CLK 1843200
  1311. #define OR32_CONSOLE_BAUD 115200
  1312. #define UART_DIVISOR SYS_CLK/(16*OR32_CONSOLE_BAUD)
  1313. ENTRY(_early_uart_init)
  1314. l.movhi r3,hi(UART_BASE_ADD)
  1315. l.addi r4,r0,0x7
  1316. l.sb 0x2(r3),r4
  1317. l.addi r4,r0,0x0
  1318. l.sb 0x1(r3),r4
  1319. l.addi r4,r0,0x3
  1320. l.sb 0x3(r3),r4
  1321. l.lbz r5,3(r3)
  1322. l.ori r4,r5,0x80
  1323. l.sb 0x3(r3),r4
  1324. l.addi r4,r0,((UART_DIVISOR>>8) & 0x000000ff)
  1325. l.sb UART_DLM(r3),r4
  1326. l.addi r4,r0,((UART_DIVISOR) & 0x000000ff)
  1327. l.sb UART_DLL(r3),r4
  1328. l.sb 0x3(r3),r5
  1329. l.jr r9
  1330. l.nop
  1331. _string_copying_linux:
  1332. .string "\n\n\n\n\n\rCopying Linux... \0"
  1333. _string_ok_booting:
  1334. .string "Ok, booting the kernel.\n\r\0"
  1335. _string_unhandled_exception:
  1336. .string "\n\rRunarunaround: Unhandled exception 0x\0"
  1337. _string_epc_prefix:
  1338. .string ": EPC=0x\0"
  1339. _string_nl:
  1340. .string "\n\r\0"
  1341. .global _string_esr_irq_bug
  1342. _string_esr_irq_bug:
  1343. .string "\n\rESR external interrupt bug, for details look into entry.S\n\r\0"
  1344. /* ========================================[ page aligned structures ]=== */
  1345. /*
  1346. * .data section should be page aligned
  1347. * (look into arch/or32/kernel/vmlinux.lds)
  1348. */
  1349. .section .data,"aw"
  1350. .align 8192
  1351. .global empty_zero_page
  1352. empty_zero_page:
  1353. .space 8192
  1354. .global swapper_pg_dir
  1355. swapper_pg_dir:
  1356. .space 8192
  1357. .global _unhandled_stack
  1358. _unhandled_stack:
  1359. .space 8192
  1360. _unhandled_stack_top:
  1361. /* ============================================================[ EOF ]=== */