entry.S 40 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519
  1. /* entry.S: FR-V entry
  2. *
  3. * Copyright (C) 2003 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. *
  11. *
  12. * Entry to the kernel is "interesting":
  13. * (1) There are no stack pointers, not even for the kernel
  14. * (2) General Registers should not be clobbered
  15. * (3) There are no kernel-only data registers
  16. * (4) Since all addressing modes are wrt to a General Register, no global
  17. * variables can be reached
  18. *
  19. * We deal with this by declaring that we shall kill GR28 on entering the
  20. * kernel from userspace
  21. *
  22. * However, since break interrupts can interrupt the CPU even when PSR.ET==0,
  23. * they can't rely on GR28 to be anything useful, and so need to clobber a
  24. * separate register (GR31). Break interrupts are managed in break.S
  25. *
  26. * GR29 _is_ saved, and holds the current task pointer globally
  27. *
  28. */
  29. #include <linux/linkage.h>
  30. #include <asm/thread_info.h>
  31. #include <asm/setup.h>
  32. #include <asm/segment.h>
  33. #include <asm/ptrace.h>
  34. #include <asm/errno.h>
  35. #include <asm/cache.h>
  36. #include <asm/spr-regs.h>
  37. #define nr_syscalls ((syscall_table_size)/4)
  38. .section .text..entry
  39. .balign 4
  40. .macro LEDS val
  41. # sethi.p %hi(0xe1200004),gr30
  42. # setlo %lo(0xe1200004),gr30
  43. # setlos #~\val,gr31
  44. # st gr31,@(gr30,gr0)
  45. # sethi.p %hi(0xffc00100),gr30
  46. # setlo %lo(0xffc00100),gr30
  47. # sth gr0,@(gr30,gr0)
  48. # membar
  49. .endm
  50. .macro LEDS32
  51. # not gr31,gr31
  52. # sethi.p %hi(0xe1200004),gr30
  53. # setlo %lo(0xe1200004),gr30
  54. # st.p gr31,@(gr30,gr0)
  55. # srli gr31,#16,gr31
  56. # sethi.p %hi(0xffc00100),gr30
  57. # setlo %lo(0xffc00100),gr30
  58. # sth gr31,@(gr30,gr0)
  59. # membar
  60. .endm
  61. ###############################################################################
  62. #
  63. # entry point for External interrupts received whilst executing userspace code
  64. #
  65. ###############################################################################
  66. .globl __entry_uspace_external_interrupt
  67. .type __entry_uspace_external_interrupt,@function
  68. __entry_uspace_external_interrupt:
  69. LEDS 0x6200
  70. sethi.p %hi(__kernel_frame0_ptr),gr28
  71. setlo %lo(__kernel_frame0_ptr),gr28
  72. ldi @(gr28,#0),gr28
  73. # handle h/w single-step through exceptions
  74. sti gr0,@(gr28,#REG__STATUS)
  75. .globl __entry_uspace_external_interrupt_reentry
  76. __entry_uspace_external_interrupt_reentry:
  77. LEDS 0x6201
  78. setlos #REG__END,gr30
  79. dcpl gr28,gr30,#0
  80. # finish building the exception frame
  81. sti sp, @(gr28,#REG_SP)
  82. stdi gr2, @(gr28,#REG_GR(2))
  83. stdi gr4, @(gr28,#REG_GR(4))
  84. stdi gr6, @(gr28,#REG_GR(6))
  85. stdi gr8, @(gr28,#REG_GR(8))
  86. stdi gr10,@(gr28,#REG_GR(10))
  87. stdi gr12,@(gr28,#REG_GR(12))
  88. stdi gr14,@(gr28,#REG_GR(14))
  89. stdi gr16,@(gr28,#REG_GR(16))
  90. stdi gr18,@(gr28,#REG_GR(18))
  91. stdi gr20,@(gr28,#REG_GR(20))
  92. stdi gr22,@(gr28,#REG_GR(22))
  93. stdi gr24,@(gr28,#REG_GR(24))
  94. stdi gr26,@(gr28,#REG_GR(26))
  95. sti gr0, @(gr28,#REG_GR(28))
  96. sti gr29,@(gr28,#REG_GR(29))
  97. stdi.p gr30,@(gr28,#REG_GR(30))
  98. # set up the kernel stack pointer
  99. ori gr28,0,sp
  100. movsg tbr ,gr20
  101. movsg psr ,gr22
  102. movsg pcsr,gr21
  103. movsg isr ,gr23
  104. movsg ccr ,gr24
  105. movsg cccr,gr25
  106. movsg lr ,gr26
  107. movsg lcr ,gr27
  108. setlos.p #-1,gr4
  109. andi gr22,#PSR_PS,gr5 /* try to rebuild original PSR value */
  110. andi.p gr22,#~(PSR_PS|PSR_S),gr6
  111. slli gr5,#1,gr5
  112. or gr6,gr5,gr5
  113. andi gr5,#~PSR_ET,gr5
  114. sti gr20,@(gr28,#REG_TBR)
  115. sti gr21,@(gr28,#REG_PC)
  116. sti gr5 ,@(gr28,#REG_PSR)
  117. sti gr23,@(gr28,#REG_ISR)
  118. stdi gr24,@(gr28,#REG_CCR)
  119. stdi gr26,@(gr28,#REG_LR)
  120. sti gr4 ,@(gr28,#REG_SYSCALLNO)
  121. movsg iacc0h,gr4
  122. movsg iacc0l,gr5
  123. stdi gr4,@(gr28,#REG_IACC0)
  124. movsg gner0,gr4
  125. movsg gner1,gr5
  126. stdi.p gr4,@(gr28,#REG_GNER0)
  127. # interrupts start off fully disabled in the interrupt handler
  128. subcc gr0,gr0,gr0,icc2 /* set Z and clear C */
  129. # set up kernel global registers
  130. sethi.p %hi(__kernel_current_task),gr5
  131. setlo %lo(__kernel_current_task),gr5
  132. sethi.p %hi(_gp),gr16
  133. setlo %lo(_gp),gr16
  134. ldi @(gr5,#0),gr29
  135. ldi.p @(gr29,#4),gr15 ; __current_thread_info = current->thread_info
  136. # make sure we (the kernel) get div-zero and misalignment exceptions
  137. setlos #ISR_EDE|ISR_DTT_DIVBYZERO|ISR_EMAM_EXCEPTION,gr5
  138. movgs gr5,isr
  139. # switch to the kernel trap table
  140. sethi.p %hi(__entry_kerneltrap_table),gr6
  141. setlo %lo(__entry_kerneltrap_table),gr6
  142. movgs gr6,tbr
  143. # set the return address
  144. sethi.p %hi(__entry_return_from_user_interrupt),gr4
  145. setlo %lo(__entry_return_from_user_interrupt),gr4
  146. movgs gr4,lr
  147. # raise the minimum interrupt priority to 15 (NMI only) and enable exceptions
  148. movsg psr,gr4
  149. ori gr4,#PSR_PIL_14,gr4
  150. movgs gr4,psr
  151. ori gr4,#PSR_PIL_14|PSR_ET,gr4
  152. movgs gr4,psr
  153. LEDS 0x6202
  154. bra do_IRQ
  155. .size __entry_uspace_external_interrupt,.-__entry_uspace_external_interrupt
  156. ###############################################################################
  157. #
  158. # entry point for External interrupts received whilst executing kernel code
  159. # - on arriving here, the following registers should already be set up:
  160. # GR15 - current thread_info struct pointer
  161. # GR16 - kernel GP-REL pointer
  162. # GR29 - current task struct pointer
  163. # TBR - kernel trap vector table
  164. # ISR - kernel's preferred integer controls
  165. #
  166. ###############################################################################
  167. .globl __entry_kernel_external_interrupt
  168. .type __entry_kernel_external_interrupt,@function
  169. __entry_kernel_external_interrupt:
  170. LEDS 0x6210
  171. // sub sp,gr15,gr31
  172. // LEDS32
  173. # set up the stack pointer
  174. or.p sp,gr0,gr30
  175. subi sp,#REG__END,sp
  176. sti gr30,@(sp,#REG_SP)
  177. # handle h/w single-step through exceptions
  178. sti gr0,@(sp,#REG__STATUS)
  179. .globl __entry_kernel_external_interrupt_reentry
  180. __entry_kernel_external_interrupt_reentry:
  181. LEDS 0x6211
  182. # set up the exception frame
  183. setlos #REG__END,gr30
  184. dcpl sp,gr30,#0
  185. sti.p gr28,@(sp,#REG_GR(28))
  186. ori sp,0,gr28
  187. # finish building the exception frame
  188. stdi gr2,@(gr28,#REG_GR(2))
  189. stdi gr4,@(gr28,#REG_GR(4))
  190. stdi gr6,@(gr28,#REG_GR(6))
  191. stdi gr8,@(gr28,#REG_GR(8))
  192. stdi gr10,@(gr28,#REG_GR(10))
  193. stdi gr12,@(gr28,#REG_GR(12))
  194. stdi gr14,@(gr28,#REG_GR(14))
  195. stdi gr16,@(gr28,#REG_GR(16))
  196. stdi gr18,@(gr28,#REG_GR(18))
  197. stdi gr20,@(gr28,#REG_GR(20))
  198. stdi gr22,@(gr28,#REG_GR(22))
  199. stdi gr24,@(gr28,#REG_GR(24))
  200. stdi gr26,@(gr28,#REG_GR(26))
  201. sti gr29,@(gr28,#REG_GR(29))
  202. stdi.p gr30,@(gr28,#REG_GR(30))
  203. # note virtual interrupts will be fully enabled upon return
  204. subicc gr0,#1,gr0,icc2 /* clear Z, set C */
  205. movsg tbr ,gr20
  206. movsg psr ,gr22
  207. movsg pcsr,gr21
  208. movsg isr ,gr23
  209. movsg ccr ,gr24
  210. movsg cccr,gr25
  211. movsg lr ,gr26
  212. movsg lcr ,gr27
  213. setlos.p #-1,gr4
  214. andi gr22,#PSR_PS,gr5 /* try to rebuild original PSR value */
  215. andi.p gr22,#~(PSR_PS|PSR_S),gr6
  216. slli gr5,#1,gr5
  217. or gr6,gr5,gr5
  218. andi.p gr5,#~PSR_ET,gr5
  219. # set CCCR.CC3 to Undefined to abort atomic-modify completion inside the kernel
  220. # - for an explanation of how it works, see: Documentation/frv/atomic-ops.txt
  221. andi gr25,#~0xc0,gr25
  222. sti gr20,@(gr28,#REG_TBR)
  223. sti gr21,@(gr28,#REG_PC)
  224. sti gr5 ,@(gr28,#REG_PSR)
  225. sti gr23,@(gr28,#REG_ISR)
  226. stdi gr24,@(gr28,#REG_CCR)
  227. stdi gr26,@(gr28,#REG_LR)
  228. sti gr4 ,@(gr28,#REG_SYSCALLNO)
  229. movsg iacc0h,gr4
  230. movsg iacc0l,gr5
  231. stdi gr4,@(gr28,#REG_IACC0)
  232. movsg gner0,gr4
  233. movsg gner1,gr5
  234. stdi.p gr4,@(gr28,#REG_GNER0)
  235. # interrupts start off fully disabled in the interrupt handler
  236. subcc gr0,gr0,gr0,icc2 /* set Z and clear C */
  237. # set the return address
  238. sethi.p %hi(__entry_return_from_kernel_interrupt),gr4
  239. setlo %lo(__entry_return_from_kernel_interrupt),gr4
  240. movgs gr4,lr
  241. # clear power-saving mode flags
  242. movsg hsr0,gr4
  243. andi gr4,#~HSR0_PDM,gr4
  244. movgs gr4,hsr0
  245. # raise the minimum interrupt priority to 15 (NMI only) and enable exceptions
  246. movsg psr,gr4
  247. ori gr4,#PSR_PIL_14,gr4
  248. movgs gr4,psr
  249. ori gr4,#PSR_ET,gr4
  250. movgs gr4,psr
  251. LEDS 0x6212
  252. bra do_IRQ
  253. .size __entry_kernel_external_interrupt,.-__entry_kernel_external_interrupt
  254. ###############################################################################
  255. #
  256. # deal with interrupts that were actually virtually disabled
  257. # - we need to really disable them, flag the fact and return immediately
  258. # - if you change this, you must alter break.S also
  259. #
  260. ###############################################################################
  261. .balign L1_CACHE_BYTES
  262. .globl __entry_kernel_external_interrupt_virtually_disabled
  263. .type __entry_kernel_external_interrupt_virtually_disabled,@function
  264. __entry_kernel_external_interrupt_virtually_disabled:
  265. movsg psr,gr30
  266. andi gr30,#~PSR_PIL,gr30
  267. ori gr30,#PSR_PIL_14,gr30 ; debugging interrupts only
  268. movgs gr30,psr
  269. subcc gr0,gr0,gr0,icc2 ; leave Z set, clear C
  270. rett #0
  271. .size __entry_kernel_external_interrupt_virtually_disabled,.-__entry_kernel_external_interrupt_virtually_disabled
  272. ###############################################################################
  273. #
  274. # deal with re-enablement of interrupts that were pending when virtually re-enabled
  275. # - set ICC2.C, re-enable the real interrupts and return
  276. # - we can clear ICC2.Z because we shouldn't be here if it's not 0 [due to TIHI]
  277. # - if you change this, you must alter break.S also
  278. #
  279. ###############################################################################
  280. .balign L1_CACHE_BYTES
  281. .globl __entry_kernel_external_interrupt_virtual_reenable
  282. .type __entry_kernel_external_interrupt_virtual_reenable,@function
  283. __entry_kernel_external_interrupt_virtual_reenable:
  284. movsg psr,gr30
  285. andi gr30,#~PSR_PIL,gr30 ; re-enable interrupts
  286. movgs gr30,psr
  287. subicc gr0,#1,gr0,icc2 ; clear Z, set C
  288. rett #0
  289. .size __entry_kernel_external_interrupt_virtual_reenable,.-__entry_kernel_external_interrupt_virtual_reenable
  290. ###############################################################################
  291. #
  292. # entry point for Software and Progam interrupts generated whilst executing userspace code
  293. #
  294. ###############################################################################
  295. .globl __entry_uspace_softprog_interrupt
  296. .type __entry_uspace_softprog_interrupt,@function
  297. .globl __entry_uspace_handle_mmu_fault
  298. __entry_uspace_softprog_interrupt:
  299. LEDS 0x6000
  300. #ifdef CONFIG_MMU
  301. movsg ear0,gr28
  302. __entry_uspace_handle_mmu_fault:
  303. movgs gr28,scr2
  304. #endif
  305. sethi.p %hi(__kernel_frame0_ptr),gr28
  306. setlo %lo(__kernel_frame0_ptr),gr28
  307. ldi @(gr28,#0),gr28
  308. # handle h/w single-step through exceptions
  309. sti gr0,@(gr28,#REG__STATUS)
  310. .globl __entry_uspace_softprog_interrupt_reentry
  311. __entry_uspace_softprog_interrupt_reentry:
  312. LEDS 0x6001
  313. setlos #REG__END,gr30
  314. dcpl gr28,gr30,#0
  315. # set up the kernel stack pointer
  316. sti.p sp,@(gr28,#REG_SP)
  317. ori gr28,0,sp
  318. sti gr0,@(gr28,#REG_GR(28))
  319. stdi gr20,@(gr28,#REG_GR(20))
  320. stdi gr22,@(gr28,#REG_GR(22))
  321. movsg tbr,gr20
  322. movsg pcsr,gr21
  323. movsg psr,gr22
  324. sethi.p %hi(__entry_return_from_user_exception),gr23
  325. setlo %lo(__entry_return_from_user_exception),gr23
  326. bra __entry_common
  327. .size __entry_uspace_softprog_interrupt,.-__entry_uspace_softprog_interrupt
  328. # single-stepping was disabled on entry to a TLB handler that then faulted
  329. #ifdef CONFIG_MMU
  330. .globl __entry_uspace_handle_mmu_fault_sstep
  331. __entry_uspace_handle_mmu_fault_sstep:
  332. movgs gr28,scr2
  333. sethi.p %hi(__kernel_frame0_ptr),gr28
  334. setlo %lo(__kernel_frame0_ptr),gr28
  335. ldi @(gr28,#0),gr28
  336. # flag single-step re-enablement
  337. sti gr0,@(gr28,#REG__STATUS)
  338. bra __entry_uspace_softprog_interrupt_reentry
  339. #endif
  340. ###############################################################################
  341. #
  342. # entry point for Software and Progam interrupts generated whilst executing kernel code
  343. #
  344. ###############################################################################
  345. .globl __entry_kernel_softprog_interrupt
  346. .type __entry_kernel_softprog_interrupt,@function
  347. __entry_kernel_softprog_interrupt:
  348. LEDS 0x6004
  349. #ifdef CONFIG_MMU
  350. movsg ear0,gr30
  351. movgs gr30,scr2
  352. #endif
  353. .globl __entry_kernel_handle_mmu_fault
  354. __entry_kernel_handle_mmu_fault:
  355. # set up the stack pointer
  356. subi sp,#REG__END,sp
  357. sti sp,@(sp,#REG_SP)
  358. sti sp,@(sp,#REG_SP-4)
  359. andi sp,#~7,sp
  360. # handle h/w single-step through exceptions
  361. sti gr0,@(sp,#REG__STATUS)
  362. .globl __entry_kernel_softprog_interrupt_reentry
  363. __entry_kernel_softprog_interrupt_reentry:
  364. LEDS 0x6005
  365. setlos #REG__END,gr30
  366. dcpl sp,gr30,#0
  367. # set up the exception frame
  368. sti.p gr28,@(sp,#REG_GR(28))
  369. ori sp,0,gr28
  370. stdi gr20,@(gr28,#REG_GR(20))
  371. stdi gr22,@(gr28,#REG_GR(22))
  372. ldi @(sp,#REG_SP),gr22 /* reconstruct the old SP */
  373. addi gr22,#REG__END,gr22
  374. sti gr22,@(sp,#REG_SP)
  375. # set CCCR.CC3 to Undefined to abort atomic-modify completion inside the kernel
  376. # - for an explanation of how it works, see: Documentation/frv/atomic-ops.txt
  377. movsg cccr,gr20
  378. andi gr20,#~0xc0,gr20
  379. movgs gr20,cccr
  380. movsg tbr,gr20
  381. movsg pcsr,gr21
  382. movsg psr,gr22
  383. sethi.p %hi(__entry_return_from_kernel_exception),gr23
  384. setlo %lo(__entry_return_from_kernel_exception),gr23
  385. bra __entry_common
  386. .size __entry_kernel_softprog_interrupt,.-__entry_kernel_softprog_interrupt
  387. # single-stepping was disabled on entry to a TLB handler that then faulted
  388. #ifdef CONFIG_MMU
  389. .globl __entry_kernel_handle_mmu_fault_sstep
  390. __entry_kernel_handle_mmu_fault_sstep:
  391. # set up the stack pointer
  392. subi sp,#REG__END,sp
  393. sti sp,@(sp,#REG_SP)
  394. sti sp,@(sp,#REG_SP-4)
  395. andi sp,#~7,sp
  396. # flag single-step re-enablement
  397. sethi #REG__STATUS_STEP,gr30
  398. sti gr30,@(sp,#REG__STATUS)
  399. bra __entry_kernel_softprog_interrupt_reentry
  400. #endif
  401. ###############################################################################
  402. #
  403. # the rest of the kernel entry point code
  404. # - on arriving here, the following registers should be set up:
  405. # GR1 - kernel stack pointer
  406. # GR7 - syscall number (trap 0 only)
  407. # GR8-13 - syscall args (trap 0 only)
  408. # GR20 - saved TBR
  409. # GR21 - saved PC
  410. # GR22 - saved PSR
  411. # GR23 - return handler address
  412. # GR28 - exception frame on stack
  413. # SCR2 - saved EAR0 where applicable (clobbered by ICI & ICEF insns on FR451)
  414. # PSR - PSR.S 1, PSR.ET 0
  415. #
  416. ###############################################################################
  417. .globl __entry_common
  418. .type __entry_common,@function
  419. __entry_common:
  420. LEDS 0x6008
  421. # finish building the exception frame
  422. stdi gr2,@(gr28,#REG_GR(2))
  423. stdi gr4,@(gr28,#REG_GR(4))
  424. stdi gr6,@(gr28,#REG_GR(6))
  425. stdi gr8,@(gr28,#REG_GR(8))
  426. stdi gr10,@(gr28,#REG_GR(10))
  427. stdi gr12,@(gr28,#REG_GR(12))
  428. stdi gr14,@(gr28,#REG_GR(14))
  429. stdi gr16,@(gr28,#REG_GR(16))
  430. stdi gr18,@(gr28,#REG_GR(18))
  431. stdi gr24,@(gr28,#REG_GR(24))
  432. stdi gr26,@(gr28,#REG_GR(26))
  433. sti gr29,@(gr28,#REG_GR(29))
  434. stdi gr30,@(gr28,#REG_GR(30))
  435. movsg lcr ,gr27
  436. movsg lr ,gr26
  437. movgs gr23,lr
  438. movsg cccr,gr25
  439. movsg ccr ,gr24
  440. movsg isr ,gr23
  441. setlos.p #-1,gr4
  442. andi gr22,#PSR_PS,gr5 /* try to rebuild original PSR value */
  443. andi.p gr22,#~(PSR_PS|PSR_S),gr6
  444. slli gr5,#1,gr5
  445. or gr6,gr5,gr5
  446. andi gr5,#~PSR_ET,gr5
  447. sti gr20,@(gr28,#REG_TBR)
  448. sti gr21,@(gr28,#REG_PC)
  449. sti gr5 ,@(gr28,#REG_PSR)
  450. sti gr23,@(gr28,#REG_ISR)
  451. stdi gr24,@(gr28,#REG_CCR)
  452. stdi gr26,@(gr28,#REG_LR)
  453. sti gr4 ,@(gr28,#REG_SYSCALLNO)
  454. movsg iacc0h,gr4
  455. movsg iacc0l,gr5
  456. stdi gr4,@(gr28,#REG_IACC0)
  457. movsg gner0,gr4
  458. movsg gner1,gr5
  459. stdi.p gr4,@(gr28,#REG_GNER0)
  460. # set up virtual interrupt disablement
  461. subicc gr0,#1,gr0,icc2 /* clear Z flag, set C flag */
  462. # set up kernel global registers
  463. sethi.p %hi(__kernel_current_task),gr5
  464. setlo %lo(__kernel_current_task),gr5
  465. sethi.p %hi(_gp),gr16
  466. setlo %lo(_gp),gr16
  467. ldi @(gr5,#0),gr29
  468. ldi @(gr29,#4),gr15 ; __current_thread_info = current->thread_info
  469. # switch to the kernel trap table
  470. sethi.p %hi(__entry_kerneltrap_table),gr6
  471. setlo %lo(__entry_kerneltrap_table),gr6
  472. movgs gr6,tbr
  473. # make sure we (the kernel) get div-zero and misalignment exceptions
  474. setlos #ISR_EDE|ISR_DTT_DIVBYZERO|ISR_EMAM_EXCEPTION,gr5
  475. movgs gr5,isr
  476. # clear power-saving mode flags
  477. movsg hsr0,gr4
  478. andi gr4,#~HSR0_PDM,gr4
  479. movgs gr4,hsr0
  480. # multiplex again using old TBR as a guide
  481. setlos.p #TBR_TT,gr3
  482. sethi %hi(__entry_vector_table),gr6
  483. and.p gr20,gr3,gr5
  484. setlo %lo(__entry_vector_table),gr6
  485. srli gr5,#2,gr5
  486. ld @(gr5,gr6),gr5
  487. LEDS 0x6009
  488. jmpl @(gr5,gr0)
  489. .size __entry_common,.-__entry_common
  490. ###############################################################################
  491. #
  492. # handle instruction MMU fault
  493. #
  494. ###############################################################################
  495. #ifdef CONFIG_MMU
  496. .globl __entry_insn_mmu_fault
  497. __entry_insn_mmu_fault:
  498. LEDS 0x6010
  499. setlos #0,gr8
  500. movsg esr0,gr9
  501. movsg scr2,gr10
  502. # now that we've accessed the exception regs, we can enable exceptions
  503. movsg psr,gr4
  504. ori gr4,#PSR_ET,gr4
  505. movgs gr4,psr
  506. sethi.p %hi(do_page_fault),gr5
  507. setlo %lo(do_page_fault),gr5
  508. jmpl @(gr5,gr0) ; call do_page_fault(0,esr0,ear0)
  509. #endif
  510. ###############################################################################
  511. #
  512. # handle instruction access error
  513. #
  514. ###############################################################################
  515. .globl __entry_insn_access_error
  516. __entry_insn_access_error:
  517. LEDS 0x6011
  518. sethi.p %hi(insn_access_error),gr5
  519. setlo %lo(insn_access_error),gr5
  520. movsg esfr1,gr8
  521. movsg epcr0,gr9
  522. movsg esr0,gr10
  523. # now that we've accessed the exception regs, we can enable exceptions
  524. movsg psr,gr4
  525. ori gr4,#PSR_ET,gr4
  526. movgs gr4,psr
  527. jmpl @(gr5,gr0) ; call insn_access_error(esfr1,epcr0,esr0)
  528. ###############################################################################
  529. #
  530. # handle various instructions of dubious legality
  531. #
  532. ###############################################################################
  533. .globl __entry_unsupported_trap
  534. .globl __entry_illegal_instruction
  535. .globl __entry_privileged_instruction
  536. .globl __entry_debug_exception
  537. __entry_unsupported_trap:
  538. subi gr21,#4,gr21
  539. sti gr21,@(gr28,#REG_PC)
  540. __entry_illegal_instruction:
  541. __entry_privileged_instruction:
  542. __entry_debug_exception:
  543. LEDS 0x6012
  544. sethi.p %hi(illegal_instruction),gr5
  545. setlo %lo(illegal_instruction),gr5
  546. movsg esfr1,gr8
  547. movsg epcr0,gr9
  548. movsg esr0,gr10
  549. # now that we've accessed the exception regs, we can enable exceptions
  550. movsg psr,gr4
  551. ori gr4,#PSR_ET,gr4
  552. movgs gr4,psr
  553. jmpl @(gr5,gr0) ; call ill_insn(esfr1,epcr0,esr0)
  554. ###############################################################################
  555. #
  556. # handle atomic operation emulation for userspace
  557. #
  558. ###############################################################################
  559. .globl __entry_atomic_op
  560. __entry_atomic_op:
  561. LEDS 0x6012
  562. sethi.p %hi(atomic_operation),gr5
  563. setlo %lo(atomic_operation),gr5
  564. movsg esfr1,gr8
  565. movsg epcr0,gr9
  566. movsg esr0,gr10
  567. # now that we've accessed the exception regs, we can enable exceptions
  568. movsg psr,gr4
  569. ori gr4,#PSR_ET,gr4
  570. movgs gr4,psr
  571. jmpl @(gr5,gr0) ; call atomic_operation(esfr1,epcr0,esr0)
  572. ###############################################################################
  573. #
  574. # handle media exception
  575. #
  576. ###############################################################################
  577. .globl __entry_media_exception
  578. __entry_media_exception:
  579. LEDS 0x6013
  580. sethi.p %hi(media_exception),gr5
  581. setlo %lo(media_exception),gr5
  582. movsg msr0,gr8
  583. movsg msr1,gr9
  584. # now that we've accessed the exception regs, we can enable exceptions
  585. movsg psr,gr4
  586. ori gr4,#PSR_ET,gr4
  587. movgs gr4,psr
  588. jmpl @(gr5,gr0) ; call media_excep(msr0,msr1)
  589. ###############################################################################
  590. #
  591. # handle data MMU fault
  592. # handle data DAT fault (write-protect exception)
  593. #
  594. ###############################################################################
  595. #ifdef CONFIG_MMU
  596. .globl __entry_data_mmu_fault
  597. __entry_data_mmu_fault:
  598. .globl __entry_data_dat_fault
  599. __entry_data_dat_fault:
  600. LEDS 0x6014
  601. setlos #1,gr8
  602. movsg esr0,gr9
  603. movsg scr2,gr10 ; saved EAR0
  604. # now that we've accessed the exception regs, we can enable exceptions
  605. movsg psr,gr4
  606. ori gr4,#PSR_ET,gr4
  607. movgs gr4,psr
  608. sethi.p %hi(do_page_fault),gr5
  609. setlo %lo(do_page_fault),gr5
  610. jmpl @(gr5,gr0) ; call do_page_fault(1,esr0,ear0)
  611. #endif
  612. ###############################################################################
  613. #
  614. # handle data and instruction access exceptions
  615. #
  616. ###############################################################################
  617. .globl __entry_insn_access_exception
  618. .globl __entry_data_access_exception
  619. __entry_insn_access_exception:
  620. __entry_data_access_exception:
  621. LEDS 0x6016
  622. sethi.p %hi(memory_access_exception),gr5
  623. setlo %lo(memory_access_exception),gr5
  624. movsg esr0,gr8
  625. movsg scr2,gr9 ; saved EAR0
  626. movsg epcr0,gr10
  627. # now that we've accessed the exception regs, we can enable exceptions
  628. movsg psr,gr4
  629. ori gr4,#PSR_ET,gr4
  630. movgs gr4,psr
  631. jmpl @(gr5,gr0) ; call memory_access_error(esr0,ear0,epcr0)
  632. ###############################################################################
  633. #
  634. # handle data access error
  635. #
  636. ###############################################################################
  637. .globl __entry_data_access_error
  638. __entry_data_access_error:
  639. LEDS 0x6016
  640. sethi.p %hi(data_access_error),gr5
  641. setlo %lo(data_access_error),gr5
  642. movsg esfr1,gr8
  643. movsg esr15,gr9
  644. movsg ear15,gr10
  645. # now that we've accessed the exception regs, we can enable exceptions
  646. movsg psr,gr4
  647. ori gr4,#PSR_ET,gr4
  648. movgs gr4,psr
  649. jmpl @(gr5,gr0) ; call data_access_error(esfr1,esr15,ear15)
  650. ###############################################################################
  651. #
  652. # handle data store error
  653. #
  654. ###############################################################################
  655. .globl __entry_data_store_error
  656. __entry_data_store_error:
  657. LEDS 0x6017
  658. sethi.p %hi(data_store_error),gr5
  659. setlo %lo(data_store_error),gr5
  660. movsg esfr1,gr8
  661. movsg esr14,gr9
  662. # now that we've accessed the exception regs, we can enable exceptions
  663. movsg psr,gr4
  664. ori gr4,#PSR_ET,gr4
  665. movgs gr4,psr
  666. jmpl @(gr5,gr0) ; call data_store_error(esfr1,esr14)
  667. ###############################################################################
  668. #
  669. # handle division exception
  670. #
  671. ###############################################################################
  672. .globl __entry_division_exception
  673. __entry_division_exception:
  674. LEDS 0x6018
  675. sethi.p %hi(division_exception),gr5
  676. setlo %lo(division_exception),gr5
  677. movsg esfr1,gr8
  678. movsg esr0,gr9
  679. movsg isr,gr10
  680. # now that we've accessed the exception regs, we can enable exceptions
  681. movsg psr,gr4
  682. ori gr4,#PSR_ET,gr4
  683. movgs gr4,psr
  684. jmpl @(gr5,gr0) ; call div_excep(esfr1,esr0,isr)
  685. ###############################################################################
  686. #
  687. # handle compound exception
  688. #
  689. ###############################################################################
  690. .globl __entry_compound_exception
  691. __entry_compound_exception:
  692. LEDS 0x6019
  693. sethi.p %hi(compound_exception),gr5
  694. setlo %lo(compound_exception),gr5
  695. movsg esfr1,gr8
  696. movsg esr0,gr9
  697. movsg esr14,gr10
  698. movsg esr15,gr11
  699. movsg msr0,gr12
  700. movsg msr1,gr13
  701. # now that we've accessed the exception regs, we can enable exceptions
  702. movsg psr,gr4
  703. ori gr4,#PSR_ET,gr4
  704. movgs gr4,psr
  705. jmpl @(gr5,gr0) ; call comp_excep(esfr1,esr0,esr14,esr15,msr0,msr1)
  706. ###############################################################################
  707. #
  708. # handle interrupts and NMIs
  709. #
  710. ###############################################################################
  711. .globl __entry_do_IRQ
  712. __entry_do_IRQ:
  713. LEDS 0x6020
  714. # we can enable exceptions
  715. movsg psr,gr4
  716. ori gr4,#PSR_ET,gr4
  717. movgs gr4,psr
  718. bra do_IRQ
  719. .globl __entry_do_NMI
  720. __entry_do_NMI:
  721. LEDS 0x6021
  722. # we can enable exceptions
  723. movsg psr,gr4
  724. ori gr4,#PSR_ET,gr4
  725. movgs gr4,psr
  726. bra do_NMI
  727. ###############################################################################
  728. #
  729. # the return path for a newly forked child process
  730. # - __switch_to() saved the old current pointer in GR8 for us
  731. #
  732. ###############################################################################
  733. .globl ret_from_fork
  734. ret_from_fork:
  735. LEDS 0x6100
  736. call schedule_tail
  737. # fork & co. return 0 to child
  738. setlos.p #0,gr8
  739. bra __syscall_exit
  740. .globl ret_from_kernel_thread
  741. ret_from_kernel_thread:
  742. lddi.p @(gr28,#REG_GR(8)),gr20
  743. call schedule_tail
  744. calll.p @(gr21,gr0)
  745. or gr20,gr20,gr8
  746. bra __syscall_exit
  747. ###################################################################################################
  748. #
  749. # Return to user mode is not as complex as all this looks,
  750. # but we want the default path for a system call return to
  751. # go as quickly as possible which is why some of this is
  752. # less clear than it otherwise should be.
  753. #
  754. ###################################################################################################
  755. .balign L1_CACHE_BYTES
  756. .globl system_call
  757. system_call:
  758. LEDS 0x6101
  759. movsg psr,gr4 ; enable exceptions
  760. ori gr4,#PSR_ET,gr4
  761. movgs gr4,psr
  762. sti gr7,@(gr28,#REG_SYSCALLNO)
  763. sti.p gr8,@(gr28,#REG_ORIG_GR8)
  764. subicc gr7,#nr_syscalls,gr0,icc0
  765. bnc icc0,#0,__syscall_badsys
  766. ldi @(gr15,#TI_FLAGS),gr4
  767. andicc gr4,#_TIF_SYSCALL_TRACE,gr0,icc0
  768. bne icc0,#0,__syscall_trace_entry
  769. __syscall_call:
  770. slli.p gr7,#2,gr7
  771. sethi %hi(sys_call_table),gr5
  772. setlo %lo(sys_call_table),gr5
  773. ld @(gr5,gr7),gr4
  774. calll @(gr4,gr0)
  775. ###############################################################################
  776. #
  777. # return to interrupted process
  778. #
  779. ###############################################################################
  780. __syscall_exit:
  781. LEDS 0x6300
  782. # keep current PSR in GR23
  783. movsg psr,gr23
  784. ldi @(gr28,#REG_PSR),gr22
  785. sti.p gr8,@(gr28,#REG_GR(8)) ; save return value
  786. # rebuild saved psr - execve will change it for init/main.c
  787. srli gr22,#1,gr5
  788. andi.p gr22,#~PSR_PS,gr22
  789. andi gr5,#PSR_PS,gr5
  790. or gr5,gr22,gr22
  791. ori.p gr22,#PSR_S,gr22
  792. # make sure we don't miss an interrupt setting need_resched or sigpending between
  793. # sampling and the RETT
  794. ori gr23,#PSR_PIL_14,gr23
  795. movgs gr23,psr
  796. ldi @(gr15,#TI_FLAGS),gr4
  797. andicc gr4,#_TIF_ALLWORK_MASK,gr0,icc0
  798. bne icc0,#0,__syscall_exit_work
  799. # restore all registers and return
  800. __entry_return_direct:
  801. LEDS 0x6301
  802. andi gr22,#~PSR_ET,gr22
  803. movgs gr22,psr
  804. ldi @(gr28,#REG_ISR),gr23
  805. lddi @(gr28,#REG_CCR),gr24
  806. lddi @(gr28,#REG_LR) ,gr26
  807. ldi @(gr28,#REG_PC) ,gr21
  808. ldi @(gr28,#REG_TBR),gr20
  809. movgs gr20,tbr
  810. movgs gr21,pcsr
  811. movgs gr23,isr
  812. movgs gr24,ccr
  813. movgs gr25,cccr
  814. movgs gr26,lr
  815. movgs gr27,lcr
  816. lddi @(gr28,#REG_GNER0),gr4
  817. movgs gr4,gner0
  818. movgs gr5,gner1
  819. lddi @(gr28,#REG_IACC0),gr4
  820. movgs gr4,iacc0h
  821. movgs gr5,iacc0l
  822. lddi @(gr28,#REG_GR(4)) ,gr4
  823. lddi @(gr28,#REG_GR(6)) ,gr6
  824. lddi @(gr28,#REG_GR(8)) ,gr8
  825. lddi @(gr28,#REG_GR(10)),gr10
  826. lddi @(gr28,#REG_GR(12)),gr12
  827. lddi @(gr28,#REG_GR(14)),gr14
  828. lddi @(gr28,#REG_GR(16)),gr16
  829. lddi @(gr28,#REG_GR(18)),gr18
  830. lddi @(gr28,#REG_GR(20)),gr20
  831. lddi @(gr28,#REG_GR(22)),gr22
  832. lddi @(gr28,#REG_GR(24)),gr24
  833. lddi @(gr28,#REG_GR(26)),gr26
  834. ldi @(gr28,#REG_GR(29)),gr29
  835. lddi @(gr28,#REG_GR(30)),gr30
  836. # check to see if a debugging return is required
  837. LEDS 0x67f0
  838. movsg ccr,gr2
  839. ldi @(gr28,#REG__STATUS),gr3
  840. andicc gr3,#REG__STATUS_STEP,gr0,icc0
  841. bne icc0,#0,__entry_return_singlestep
  842. movgs gr2,ccr
  843. ldi @(gr28,#REG_SP) ,sp
  844. lddi @(gr28,#REG_GR(2)) ,gr2
  845. ldi @(gr28,#REG_GR(28)),gr28
  846. LEDS 0x67fe
  847. // movsg pcsr,gr31
  848. // LEDS32
  849. #if 0
  850. # store the current frame in the workram on the FR451
  851. movgs gr28,scr2
  852. sethi.p %hi(0xfe800000),gr28
  853. setlo %lo(0xfe800000),gr28
  854. stdi gr2,@(gr28,#REG_GR(2))
  855. stdi gr4,@(gr28,#REG_GR(4))
  856. stdi gr6,@(gr28,#REG_GR(6))
  857. stdi gr8,@(gr28,#REG_GR(8))
  858. stdi gr10,@(gr28,#REG_GR(10))
  859. stdi gr12,@(gr28,#REG_GR(12))
  860. stdi gr14,@(gr28,#REG_GR(14))
  861. stdi gr16,@(gr28,#REG_GR(16))
  862. stdi gr18,@(gr28,#REG_GR(18))
  863. stdi gr24,@(gr28,#REG_GR(24))
  864. stdi gr26,@(gr28,#REG_GR(26))
  865. sti gr29,@(gr28,#REG_GR(29))
  866. stdi gr30,@(gr28,#REG_GR(30))
  867. movsg tbr ,gr30
  868. sti gr30,@(gr28,#REG_TBR)
  869. movsg pcsr,gr30
  870. sti gr30,@(gr28,#REG_PC)
  871. movsg psr ,gr30
  872. sti gr30,@(gr28,#REG_PSR)
  873. movsg isr ,gr30
  874. sti gr30,@(gr28,#REG_ISR)
  875. movsg ccr ,gr30
  876. movsg cccr,gr31
  877. stdi gr30,@(gr28,#REG_CCR)
  878. movsg lr ,gr30
  879. movsg lcr ,gr31
  880. stdi gr30,@(gr28,#REG_LR)
  881. sti gr0 ,@(gr28,#REG_SYSCALLNO)
  882. movsg scr2,gr28
  883. #endif
  884. rett #0
  885. # return via break.S
  886. __entry_return_singlestep:
  887. movgs gr2,ccr
  888. lddi @(gr28,#REG_GR(2)) ,gr2
  889. ldi @(gr28,#REG_SP) ,sp
  890. ldi @(gr28,#REG_GR(28)),gr28
  891. LEDS 0x67ff
  892. break
  893. .globl __entry_return_singlestep_breaks_here
  894. __entry_return_singlestep_breaks_here:
  895. nop
  896. ###############################################################################
  897. #
  898. # return to a process interrupted in kernel space
  899. # - we need to consider preemption if that is enabled
  900. #
  901. ###############################################################################
  902. .balign L1_CACHE_BYTES
  903. __entry_return_from_kernel_exception:
  904. LEDS 0x6302
  905. movsg psr,gr23
  906. ori gr23,#PSR_PIL_14,gr23
  907. movgs gr23,psr
  908. bra __entry_return_direct
  909. .balign L1_CACHE_BYTES
  910. __entry_return_from_kernel_interrupt:
  911. LEDS 0x6303
  912. movsg psr,gr23
  913. ori gr23,#PSR_PIL_14,gr23
  914. movgs gr23,psr
  915. #ifdef CONFIG_PREEMPT
  916. ldi @(gr15,#TI_PRE_COUNT),gr5
  917. subicc gr5,#0,gr0,icc0
  918. beq icc0,#0,__entry_return_direct
  919. subcc gr0,gr0,gr0,icc2 /* set Z and clear C */
  920. call preempt_schedule_irq
  921. #endif
  922. bra __entry_return_direct
  923. ###############################################################################
  924. #
  925. # perform work that needs to be done immediately before resumption
  926. #
  927. ###############################################################################
  928. .globl __entry_return_from_user_exception
  929. .balign L1_CACHE_BYTES
  930. __entry_return_from_user_exception:
  931. LEDS 0x6501
  932. __entry_resume_userspace:
  933. # make sure we don't miss an interrupt setting need_resched or sigpending between
  934. # sampling and the RETT
  935. movsg psr,gr23
  936. ori gr23,#PSR_PIL_14,gr23
  937. movgs gr23,psr
  938. __entry_return_from_user_interrupt:
  939. LEDS 0x6402
  940. ldi @(gr15,#TI_FLAGS),gr4
  941. andicc gr4,#_TIF_WORK_MASK,gr0,icc0
  942. beq icc0,#1,__entry_return_direct
  943. __entry_work_pending:
  944. LEDS 0x6404
  945. andicc gr4,#_TIF_NEED_RESCHED,gr0,icc0
  946. beq icc0,#1,__entry_work_notifysig
  947. __entry_work_resched:
  948. LEDS 0x6408
  949. movsg psr,gr23
  950. andi gr23,#~PSR_PIL,gr23
  951. movgs gr23,psr
  952. call schedule
  953. movsg psr,gr23
  954. ori gr23,#PSR_PIL_14,gr23
  955. movgs gr23,psr
  956. LEDS 0x6401
  957. ldi @(gr15,#TI_FLAGS),gr4
  958. andicc gr4,#_TIF_WORK_MASK,gr0,icc0
  959. beq icc0,#1,__entry_return_direct
  960. andicc gr4,#_TIF_NEED_RESCHED,gr0,icc0
  961. bne icc0,#1,__entry_work_resched
  962. __entry_work_notifysig:
  963. LEDS 0x6410
  964. ori.p gr4,#0,gr8
  965. call do_notify_resume
  966. bra __entry_resume_userspace
  967. # perform syscall entry tracing
  968. __syscall_trace_entry:
  969. LEDS 0x6320
  970. call syscall_trace_entry
  971. lddi.p @(gr28,#REG_GR(8)) ,gr8
  972. ori gr8,#0,gr7 ; syscall_trace_entry() returned new syscallno
  973. lddi @(gr28,#REG_GR(10)),gr10
  974. lddi.p @(gr28,#REG_GR(12)),gr12
  975. subicc gr7,#nr_syscalls,gr0,icc0
  976. bnc icc0,#0,__syscall_badsys
  977. bra __syscall_call
  978. # perform syscall exit tracing
  979. __syscall_exit_work:
  980. LEDS 0x6340
  981. andicc gr22,#PSR_PS,gr0,icc1 ; don't handle on return to kernel mode
  982. andicc.p gr4,#_TIF_SYSCALL_TRACE,gr0,icc0
  983. bne icc1,#0,__entry_return_direct
  984. beq icc0,#1,__entry_work_pending
  985. movsg psr,gr23
  986. andi gr23,#~PSR_PIL,gr23 ; could let syscall_trace_exit() call schedule()
  987. movgs gr23,psr
  988. call syscall_trace_exit
  989. bra __entry_resume_userspace
  990. __syscall_badsys:
  991. LEDS 0x6380
  992. setlos #-ENOSYS,gr8
  993. sti gr8,@(gr28,#REG_GR(8)) ; save return value
  994. bra __entry_resume_userspace
  995. ###############################################################################
  996. #
  997. # syscall vector table
  998. #
  999. ###############################################################################
  1000. .section .rodata
  1001. ALIGN
  1002. .globl sys_call_table
  1003. sys_call_table:
  1004. .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
  1005. .long sys_exit
  1006. .long sys_fork
  1007. .long sys_read
  1008. .long sys_write
  1009. .long sys_open /* 5 */
  1010. .long sys_close
  1011. .long sys_waitpid
  1012. .long sys_creat
  1013. .long sys_link
  1014. .long sys_unlink /* 10 */
  1015. .long sys_execve
  1016. .long sys_chdir
  1017. .long sys_time
  1018. .long sys_mknod
  1019. .long sys_chmod /* 15 */
  1020. .long sys_lchown16
  1021. .long sys_ni_syscall /* old break syscall holder */
  1022. .long sys_stat
  1023. .long sys_lseek
  1024. .long sys_getpid /* 20 */
  1025. .long sys_mount
  1026. .long sys_oldumount
  1027. .long sys_setuid16
  1028. .long sys_getuid16
  1029. .long sys_ni_syscall // sys_stime /* 25 */
  1030. .long sys_ptrace
  1031. .long sys_alarm
  1032. .long sys_fstat
  1033. .long sys_pause
  1034. .long sys_utime /* 30 */
  1035. .long sys_ni_syscall /* old stty syscall holder */
  1036. .long sys_ni_syscall /* old gtty syscall holder */
  1037. .long sys_access
  1038. .long sys_nice
  1039. .long sys_ni_syscall /* 35 */ /* old ftime syscall holder */
  1040. .long sys_sync
  1041. .long sys_kill
  1042. .long sys_rename
  1043. .long sys_mkdir
  1044. .long sys_rmdir /* 40 */
  1045. .long sys_dup
  1046. .long sys_pipe
  1047. .long sys_times
  1048. .long sys_ni_syscall /* old prof syscall holder */
  1049. .long sys_brk /* 45 */
  1050. .long sys_setgid16
  1051. .long sys_getgid16
  1052. .long sys_ni_syscall // sys_signal
  1053. .long sys_geteuid16
  1054. .long sys_getegid16 /* 50 */
  1055. .long sys_acct
  1056. .long sys_umount /* recycled never used phys( */
  1057. .long sys_ni_syscall /* old lock syscall holder */
  1058. .long sys_ioctl
  1059. .long sys_fcntl /* 55 */
  1060. .long sys_ni_syscall /* old mpx syscall holder */
  1061. .long sys_setpgid
  1062. .long sys_ni_syscall /* old ulimit syscall holder */
  1063. .long sys_ni_syscall /* old old uname syscall */
  1064. .long sys_umask /* 60 */
  1065. .long sys_chroot
  1066. .long sys_ustat
  1067. .long sys_dup2
  1068. .long sys_getppid
  1069. .long sys_getpgrp /* 65 */
  1070. .long sys_setsid
  1071. .long sys_sigaction
  1072. .long sys_ni_syscall // sys_sgetmask
  1073. .long sys_ni_syscall // sys_ssetmask
  1074. .long sys_setreuid16 /* 70 */
  1075. .long sys_setregid16
  1076. .long sys_sigsuspend
  1077. .long sys_ni_syscall // sys_sigpending
  1078. .long sys_sethostname
  1079. .long sys_setrlimit /* 75 */
  1080. .long sys_ni_syscall // sys_old_getrlimit
  1081. .long sys_getrusage
  1082. .long sys_gettimeofday
  1083. .long sys_settimeofday
  1084. .long sys_getgroups16 /* 80 */
  1085. .long sys_setgroups16
  1086. .long sys_ni_syscall /* old_select slot */
  1087. .long sys_symlink
  1088. .long sys_lstat
  1089. .long sys_readlink /* 85 */
  1090. .long sys_uselib
  1091. .long sys_swapon
  1092. .long sys_reboot
  1093. .long sys_ni_syscall // old_readdir
  1094. .long sys_ni_syscall /* 90 */ /* old_mmap slot */
  1095. .long sys_munmap
  1096. .long sys_truncate
  1097. .long sys_ftruncate
  1098. .long sys_fchmod
  1099. .long sys_fchown16 /* 95 */
  1100. .long sys_getpriority
  1101. .long sys_setpriority
  1102. .long sys_ni_syscall /* old profil syscall holder */
  1103. .long sys_statfs
  1104. .long sys_fstatfs /* 100 */
  1105. .long sys_ni_syscall /* ioperm for i386 */
  1106. .long sys_socketcall
  1107. .long sys_syslog
  1108. .long sys_setitimer
  1109. .long sys_getitimer /* 105 */
  1110. .long sys_newstat
  1111. .long sys_newlstat
  1112. .long sys_newfstat
  1113. .long sys_ni_syscall /* obsolete olduname( syscall */
  1114. .long sys_ni_syscall /* iopl for i386 */ /* 110 */
  1115. .long sys_vhangup
  1116. .long sys_ni_syscall /* obsolete idle( syscall */
  1117. .long sys_ni_syscall /* vm86old for i386 */
  1118. .long sys_wait4
  1119. .long sys_swapoff /* 115 */
  1120. .long sys_sysinfo
  1121. .long sys_ipc
  1122. .long sys_fsync
  1123. .long sys_sigreturn
  1124. .long sys_clone /* 120 */
  1125. .long sys_setdomainname
  1126. .long sys_newuname
  1127. .long sys_ni_syscall /* old "cacheflush" */
  1128. .long sys_adjtimex
  1129. .long sys_mprotect /* 125 */
  1130. .long sys_sigprocmask
  1131. .long sys_ni_syscall /* old "create_module" */
  1132. .long sys_init_module
  1133. .long sys_delete_module
  1134. .long sys_ni_syscall /* old "get_kernel_syms" */
  1135. .long sys_quotactl
  1136. .long sys_getpgid
  1137. .long sys_fchdir
  1138. .long sys_bdflush
  1139. .long sys_sysfs /* 135 */
  1140. .long sys_personality
  1141. .long sys_ni_syscall /* for afs_syscall */
  1142. .long sys_setfsuid16
  1143. .long sys_setfsgid16
  1144. .long sys_llseek /* 140 */
  1145. .long sys_getdents
  1146. .long sys_select
  1147. .long sys_flock
  1148. .long sys_msync
  1149. .long sys_readv /* 145 */
  1150. .long sys_writev
  1151. .long sys_getsid
  1152. .long sys_fdatasync
  1153. .long sys_sysctl
  1154. .long sys_mlock /* 150 */
  1155. .long sys_munlock
  1156. .long sys_mlockall
  1157. .long sys_munlockall
  1158. .long sys_sched_setparam
  1159. .long sys_sched_getparam /* 155 */
  1160. .long sys_sched_setscheduler
  1161. .long sys_sched_getscheduler
  1162. .long sys_sched_yield
  1163. .long sys_sched_get_priority_max
  1164. .long sys_sched_get_priority_min /* 160 */
  1165. .long sys_sched_rr_get_interval
  1166. .long sys_nanosleep
  1167. .long sys_mremap
  1168. .long sys_setresuid16
  1169. .long sys_getresuid16 /* 165 */
  1170. .long sys_ni_syscall /* for vm86 */
  1171. .long sys_ni_syscall /* Old sys_query_module */
  1172. .long sys_poll
  1173. .long sys_ni_syscall /* Old nfsservctl */
  1174. .long sys_setresgid16 /* 170 */
  1175. .long sys_getresgid16
  1176. .long sys_prctl
  1177. .long sys_rt_sigreturn
  1178. .long sys_rt_sigaction
  1179. .long sys_rt_sigprocmask /* 175 */
  1180. .long sys_rt_sigpending
  1181. .long sys_rt_sigtimedwait
  1182. .long sys_rt_sigqueueinfo
  1183. .long sys_rt_sigsuspend
  1184. .long sys_pread64 /* 180 */
  1185. .long sys_pwrite64
  1186. .long sys_chown16
  1187. .long sys_getcwd
  1188. .long sys_capget
  1189. .long sys_capset /* 185 */
  1190. .long sys_sigaltstack
  1191. .long sys_sendfile
  1192. .long sys_ni_syscall /* streams1 */
  1193. .long sys_ni_syscall /* streams2 */
  1194. .long sys_vfork /* 190 */
  1195. .long sys_getrlimit
  1196. .long sys_mmap2
  1197. .long sys_truncate64
  1198. .long sys_ftruncate64
  1199. .long sys_stat64 /* 195 */
  1200. .long sys_lstat64
  1201. .long sys_fstat64
  1202. .long sys_lchown
  1203. .long sys_getuid
  1204. .long sys_getgid /* 200 */
  1205. .long sys_geteuid
  1206. .long sys_getegid
  1207. .long sys_setreuid
  1208. .long sys_setregid
  1209. .long sys_getgroups /* 205 */
  1210. .long sys_setgroups
  1211. .long sys_fchown
  1212. .long sys_setresuid
  1213. .long sys_getresuid
  1214. .long sys_setresgid /* 210 */
  1215. .long sys_getresgid
  1216. .long sys_chown
  1217. .long sys_setuid
  1218. .long sys_setgid
  1219. .long sys_setfsuid /* 215 */
  1220. .long sys_setfsgid
  1221. .long sys_pivot_root
  1222. .long sys_mincore
  1223. .long sys_madvise
  1224. .long sys_getdents64 /* 220 */
  1225. .long sys_fcntl64
  1226. .long sys_ni_syscall /* reserved for TUX */
  1227. .long sys_ni_syscall /* Reserved for Security */
  1228. .long sys_gettid
  1229. .long sys_readahead /* 225 */
  1230. .long sys_setxattr
  1231. .long sys_lsetxattr
  1232. .long sys_fsetxattr
  1233. .long sys_getxattr
  1234. .long sys_lgetxattr /* 230 */
  1235. .long sys_fgetxattr
  1236. .long sys_listxattr
  1237. .long sys_llistxattr
  1238. .long sys_flistxattr
  1239. .long sys_removexattr /* 235 */
  1240. .long sys_lremovexattr
  1241. .long sys_fremovexattr
  1242. .long sys_tkill
  1243. .long sys_sendfile64
  1244. .long sys_futex /* 240 */
  1245. .long sys_sched_setaffinity
  1246. .long sys_sched_getaffinity
  1247. .long sys_ni_syscall //sys_set_thread_area
  1248. .long sys_ni_syscall //sys_get_thread_area
  1249. .long sys_io_setup /* 245 */
  1250. .long sys_io_destroy
  1251. .long sys_io_getevents
  1252. .long sys_io_submit
  1253. .long sys_io_cancel
  1254. .long sys_fadvise64 /* 250 */
  1255. .long sys_ni_syscall
  1256. .long sys_exit_group
  1257. .long sys_lookup_dcookie
  1258. .long sys_epoll_create
  1259. .long sys_epoll_ctl /* 255 */
  1260. .long sys_epoll_wait
  1261. .long sys_remap_file_pages
  1262. .long sys_set_tid_address
  1263. .long sys_timer_create
  1264. .long sys_timer_settime /* 260 */
  1265. .long sys_timer_gettime
  1266. .long sys_timer_getoverrun
  1267. .long sys_timer_delete
  1268. .long sys_clock_settime
  1269. .long sys_clock_gettime /* 265 */
  1270. .long sys_clock_getres
  1271. .long sys_clock_nanosleep
  1272. .long sys_statfs64
  1273. .long sys_fstatfs64
  1274. .long sys_tgkill /* 270 */
  1275. .long sys_utimes
  1276. .long sys_fadvise64_64
  1277. .long sys_ni_syscall /* sys_vserver */
  1278. .long sys_mbind
  1279. .long sys_get_mempolicy
  1280. .long sys_set_mempolicy
  1281. .long sys_mq_open
  1282. .long sys_mq_unlink
  1283. .long sys_mq_timedsend
  1284. .long sys_mq_timedreceive /* 280 */
  1285. .long sys_mq_notify
  1286. .long sys_mq_getsetattr
  1287. .long sys_ni_syscall /* reserved for kexec */
  1288. .long sys_waitid
  1289. .long sys_ni_syscall /* 285 */ /* available */
  1290. .long sys_add_key
  1291. .long sys_request_key
  1292. .long sys_keyctl
  1293. .long sys_ioprio_set
  1294. .long sys_ioprio_get /* 290 */
  1295. .long sys_inotify_init
  1296. .long sys_inotify_add_watch
  1297. .long sys_inotify_rm_watch
  1298. .long sys_migrate_pages
  1299. .long sys_openat /* 295 */
  1300. .long sys_mkdirat
  1301. .long sys_mknodat
  1302. .long sys_fchownat
  1303. .long sys_futimesat
  1304. .long sys_fstatat64 /* 300 */
  1305. .long sys_unlinkat
  1306. .long sys_renameat
  1307. .long sys_linkat
  1308. .long sys_symlinkat
  1309. .long sys_readlinkat /* 305 */
  1310. .long sys_fchmodat
  1311. .long sys_faccessat
  1312. .long sys_pselect6
  1313. .long sys_ppoll
  1314. .long sys_unshare /* 310 */
  1315. .long sys_set_robust_list
  1316. .long sys_get_robust_list
  1317. .long sys_splice
  1318. .long sys_sync_file_range
  1319. .long sys_tee /* 315 */
  1320. .long sys_vmsplice
  1321. .long sys_move_pages
  1322. .long sys_getcpu
  1323. .long sys_epoll_pwait
  1324. .long sys_utimensat /* 320 */
  1325. .long sys_signalfd
  1326. .long sys_timerfd_create
  1327. .long sys_eventfd
  1328. .long sys_fallocate
  1329. .long sys_timerfd_settime /* 325 */
  1330. .long sys_timerfd_gettime
  1331. .long sys_signalfd4
  1332. .long sys_eventfd2
  1333. .long sys_epoll_create1
  1334. .long sys_dup3 /* 330 */
  1335. .long sys_pipe2
  1336. .long sys_inotify_init1
  1337. .long sys_preadv
  1338. .long sys_pwritev
  1339. .long sys_rt_tgsigqueueinfo /* 335 */
  1340. .long sys_perf_event_open
  1341. .long sys_setns
  1342. syscall_table_size = (. - sys_call_table)