entry.S 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805
  1. /*
  2. * linux/arch/unicore32/kernel/entry.S
  3. *
  4. * Code specific to PKUnity SoC and UniCore ISA
  5. *
  6. * Copyright (C) 2001-2010 GUAN Xue-tao
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. *
  12. * Low-level vector interface routines
  13. */
  14. #include <linux/init.h>
  15. #include <linux/linkage.h>
  16. #include <asm/assembler.h>
  17. #include <asm/errno.h>
  18. #include <asm/thread_info.h>
  19. #include <asm/memory.h>
  20. #include <asm/unistd.h>
  21. #include <generated/asm-offsets.h>
  22. #include "debug-macro.S"
  23. @
  24. @ Most of the stack format comes from struct pt_regs, but with
  25. @ the addition of 8 bytes for storing syscall args 5 and 6.
  26. @
  27. #define S_OFF 8
  28. /*
  29. * The SWI code relies on the fact that R0 is at the bottom of the stack
  30. * (due to slow/fast restore user regs).
  31. */
  32. #if S_R0 != 0
  33. #error "Please fix"
  34. #endif
  35. .macro zero_fp
  36. #ifdef CONFIG_FRAME_POINTER
  37. mov fp, #0
  38. #endif
  39. .endm
  40. .macro alignment_trap, rtemp
  41. #ifdef CONFIG_ALIGNMENT_TRAP
  42. ldw \rtemp, .LCcralign
  43. ldw \rtemp, [\rtemp]
  44. movc p0.c1, \rtemp, #0
  45. #endif
  46. .endm
  47. .macro load_user_sp_lr, rd, rtemp, offset = 0
  48. mov \rtemp, asr
  49. xor \rtemp, \rtemp, #(PRIV_MODE ^ SUSR_MODE)
  50. mov.a asr, \rtemp @ switch to the SUSR mode
  51. ldw sp, [\rd+], #\offset @ load sp_user
  52. ldw lr, [\rd+], #\offset + 4 @ load lr_user
  53. xor \rtemp, \rtemp, #(PRIV_MODE ^ SUSR_MODE)
  54. mov.a asr, \rtemp @ switch back to the PRIV mode
  55. .endm
  56. .macro priv_exit, rpsr
  57. mov.a bsr, \rpsr
  58. ldm.w (r0 - r15), [sp]+
  59. ldm.b (r16 - pc), [sp]+ @ load r0 - pc, asr
  60. .endm
  61. .macro restore_user_regs, fast = 0, offset = 0
  62. ldw r1, [sp+], #\offset + S_PSR @ get calling asr
  63. ldw lr, [sp+], #\offset + S_PC @ get pc
  64. mov.a bsr, r1 @ save in bsr_priv
  65. .if \fast
  66. add sp, sp, #\offset + S_R1 @ r0 is syscall return value
  67. ldm.w (r1 - r15), [sp]+ @ get calling r1 - r15
  68. ldur (r16 - lr), [sp]+ @ get calling r16 - lr
  69. .else
  70. ldm.w (r0 - r15), [sp]+ @ get calling r0 - r15
  71. ldur (r16 - lr), [sp]+ @ get calling r16 - lr
  72. .endif
  73. nop
  74. add sp, sp, #S_FRAME_SIZE - S_R16
  75. mov.a pc, lr @ return
  76. @ and move bsr_priv into asr
  77. .endm
  78. .macro get_thread_info, rd
  79. mov \rd, sp >> #13
  80. mov \rd, \rd << #13
  81. .endm
  82. .macro get_irqnr_and_base, irqnr, irqstat, base, tmp
  83. ldw \base, =(PKUNITY_INTC_BASE)
  84. ldw \irqstat, [\base+], #0xC @ INTC_ICIP
  85. ldw \tmp, [\base+], #0x4 @ INTC_ICMR
  86. and.a \irqstat, \irqstat, \tmp
  87. beq 1001f
  88. cntlz \irqnr, \irqstat
  89. rsub \irqnr, \irqnr, #31
  90. 1001: /* EQ will be set if no irqs pending */
  91. .endm
  92. #ifdef CONFIG_DEBUG_LL
  93. .macro printreg, reg, temp
  94. adr \temp, 901f
  95. stm (r0-r3), [\temp]+
  96. stw lr, [\temp+], #0x10
  97. mov r0, \reg
  98. b.l printhex8
  99. mov r0, #':'
  100. b.l printch
  101. mov r0, pc
  102. b.l printhex8
  103. adr r0, 902f
  104. b.l printascii
  105. adr \temp, 901f
  106. ldm (r0-r3), [\temp]+
  107. ldw lr, [\temp+], #0x10
  108. b 903f
  109. 901: .word 0, 0, 0, 0, 0 @ r0-r3, lr
  110. 902: .asciz ": epip4d\n"
  111. .align
  112. 903:
  113. .endm
  114. #endif
  115. /*
  116. * These are the registers used in the syscall handler, and allow us to
  117. * have in theory up to 7 arguments to a function - r0 to r6.
  118. *
  119. * Note that tbl == why is intentional.
  120. *
  121. * We must set at least "tsk" and "why" when calling ret_with_reschedule.
  122. */
  123. scno .req r21 @ syscall number
  124. tbl .req r22 @ syscall table pointer
  125. why .req r22 @ Linux syscall (!= 0)
  126. tsk .req r23 @ current thread_info
  127. /*
  128. * Interrupt handling. Preserves r17, r18, r19
  129. */
  130. .macro intr_handler
  131. 1: get_irqnr_and_base r0, r6, r5, lr
  132. beq 2f
  133. mov r1, sp
  134. @
  135. @ routine called with r0 = irq number, r1 = struct pt_regs *
  136. @
  137. adr lr, 1b
  138. b asm_do_IRQ
  139. 2:
  140. .endm
  141. /*
  142. * PRIV mode handlers
  143. */
  144. .macro priv_entry
  145. sub sp, sp, #(S_FRAME_SIZE - 4)
  146. stm (r1 - r15), [sp]+
  147. add r5, sp, #S_R15
  148. stm (r16 - r28), [r5]+
  149. ldm (r1 - r3), [r0]+
  150. add r5, sp, #S_SP - 4 @ here for interlock avoidance
  151. mov r4, #-1 @ "" "" "" ""
  152. add r0, sp, #(S_FRAME_SIZE - 4)
  153. stw.w r1, [sp+], #-4 @ save the "real" r0 copied
  154. @ from the exception stack
  155. mov r1, lr
  156. @
  157. @ We are now ready to fill in the remaining blanks on the stack:
  158. @
  159. @ r0 - sp_priv
  160. @ r1 - lr_priv
  161. @ r2 - lr_<exception>, already fixed up for correct return/restart
  162. @ r3 - bsr_<exception>
  163. @ r4 - orig_r0 (see pt_regs definition in ptrace.h)
  164. @
  165. stm (r0 - r4), [r5]+
  166. .endm
  167. /*
  168. * User mode handlers
  169. *
  170. */
  171. .macro user_entry
  172. sub sp, sp, #S_FRAME_SIZE
  173. stm (r1 - r15), [sp+]
  174. add r4, sp, #S_R16
  175. stm (r16 - r28), [r4]+
  176. ldm (r1 - r3), [r0]+
  177. add r0, sp, #S_PC @ here for interlock avoidance
  178. mov r4, #-1 @ "" "" "" ""
  179. stw r1, [sp] @ save the "real" r0 copied
  180. @ from the exception stack
  181. @
  182. @ We are now ready to fill in the remaining blanks on the stack:
  183. @
  184. @ r2 - lr_<exception>, already fixed up for correct return/restart
  185. @ r3 - bsr_<exception>
  186. @ r4 - orig_r0 (see pt_regs definition in ptrace.h)
  187. @
  188. @ Also, separately save sp_user and lr_user
  189. @
  190. stm (r2 - r4), [r0]+
  191. stur (sp, lr), [r0-]
  192. @
  193. @ Enable the alignment trap while in kernel mode
  194. @
  195. alignment_trap r0
  196. @
  197. @ Clear FP to mark the first stack frame
  198. @
  199. zero_fp
  200. .endm
  201. .text
  202. @
  203. @ __invalid - generic code for failed exception
  204. @ (re-entrant version of handlers)
  205. @
  206. __invalid:
  207. sub sp, sp, #S_FRAME_SIZE
  208. stm (r1 - r15), [sp+]
  209. add r1, sp, #S_R16
  210. stm (r16 - r28, sp, lr), [r1]+
  211. zero_fp
  212. ldm (r4 - r6), [r0]+
  213. add r0, sp, #S_PC @ here for interlock avoidance
  214. mov r7, #-1 @ "" "" "" ""
  215. stw r4, [sp] @ save preserved r0
  216. stm (r5 - r7), [r0]+ @ lr_<exception>,
  217. @ asr_<exception>, "old_r0"
  218. mov r0, sp
  219. mov r1, asr
  220. b bad_mode
  221. ENDPROC(__invalid)
  222. .align 5
  223. __dabt_priv:
  224. priv_entry
  225. @
  226. @ get ready to re-enable interrupts if appropriate
  227. @
  228. mov r17, asr
  229. cand.a r3, #PSR_I_BIT
  230. bne 1f
  231. andn r17, r17, #PSR_I_BIT
  232. 1:
  233. @
  234. @ Call the processor-specific abort handler:
  235. @
  236. @ r2 - aborted context pc
  237. @ r3 - aborted context asr
  238. @
  239. @ The abort handler must return the aborted address in r0, and
  240. @ the fault status register in r1.
  241. @
  242. movc r1, p0.c3, #0 @ get FSR
  243. movc r0, p0.c4, #0 @ get FAR
  244. @
  245. @ set desired INTR state, then call main handler
  246. @
  247. mov.a asr, r17
  248. mov r2, sp
  249. b.l do_DataAbort
  250. @
  251. @ INTRs off again before pulling preserved data off the stack
  252. @
  253. disable_irq r0
  254. @
  255. @ restore BSR and restart the instruction
  256. @
  257. ldw r2, [sp+], #S_PSR
  258. priv_exit r2 @ return from exception
  259. ENDPROC(__dabt_priv)
  260. .align 5
  261. __intr_priv:
  262. priv_entry
  263. intr_handler
  264. mov r0, #0 @ epip4d
  265. movc p0.c5, r0, #14
  266. nop; nop; nop; nop; nop; nop; nop; nop
  267. ldw r4, [sp+], #S_PSR @ irqs are already disabled
  268. priv_exit r4 @ return from exception
  269. ENDPROC(__intr_priv)
  270. .ltorg
  271. .align 5
  272. __extn_priv:
  273. priv_entry
  274. mov r0, sp @ struct pt_regs *regs
  275. mov r1, asr
  276. b bad_mode @ not supported
  277. ENDPROC(__extn_priv)
  278. .align 5
  279. __pabt_priv:
  280. priv_entry
  281. @
  282. @ re-enable interrupts if appropriate
  283. @
  284. mov r17, asr
  285. cand.a r3, #PSR_I_BIT
  286. bne 1f
  287. andn r17, r17, #PSR_I_BIT
  288. 1:
  289. @
  290. @ set args, then call main handler
  291. @
  292. @ r0 - address of faulting instruction
  293. @ r1 - pointer to registers on stack
  294. @
  295. mov r0, r2 @ pass address of aborted instruction
  296. mov r1, #5
  297. mov.a asr, r17
  298. mov r2, sp @ regs
  299. b.l do_PrefetchAbort @ call abort handler
  300. @
  301. @ INTRs off again before pulling preserved data off the stack
  302. @
  303. disable_irq r0
  304. @
  305. @ restore BSR and restart the instruction
  306. @
  307. ldw r2, [sp+], #S_PSR
  308. priv_exit r2 @ return from exception
  309. ENDPROC(__pabt_priv)
  310. .align 5
  311. .LCcralign:
  312. .word cr_alignment
  313. .align 5
  314. __dabt_user:
  315. user_entry
  316. #ifdef CONFIG_UNICORE_FPU_F64
  317. cff ip, s31
  318. cand.a ip, #0x08000000 @ FPU execption traps?
  319. beq 209f
  320. ldw ip, [sp+], #S_PC
  321. add ip, ip, #4
  322. stw ip, [sp+], #S_PC
  323. @
  324. @ fall through to the emulation code, which returns using r19 if
  325. @ it has emulated the instruction, or the more conventional lr
  326. @ if we are to treat this as a real extended instruction
  327. @
  328. @ r0 - instruction
  329. @
  330. 1: ldw.u r0, [r2]
  331. adr r19, ret_from_exception
  332. adr lr, 209f
  333. @
  334. @ fallthrough to call do_uc_f64
  335. @
  336. /*
  337. * Check whether the instruction is a co-processor instruction.
  338. * If yes, we need to call the relevant co-processor handler.
  339. *
  340. * Note that we don't do a full check here for the co-processor
  341. * instructions; all instructions with bit 27 set are well
  342. * defined. The only instructions that should fault are the
  343. * co-processor instructions.
  344. *
  345. * Emulators may wish to make use of the following registers:
  346. * r0 = instruction opcode.
  347. * r2 = PC
  348. * r19 = normal "successful" return address
  349. * r20 = this threads thread_info structure.
  350. * lr = unrecognised instruction return address
  351. */
  352. get_thread_info r20 @ get current thread
  353. and r8, r0, #0x00003c00 @ mask out CP number
  354. mov r7, #1
  355. stb r7, [r20+], #TI_USED_CP + 2 @ set appropriate used_cp[]
  356. @ F64 hardware support entry point.
  357. @ r0 = faulted instruction
  358. @ r19 = return address
  359. @ r20 = fp_state
  360. enable_irq r4
  361. add r20, r20, #TI_FPSTATE @ r20 = workspace
  362. cff r1, s31 @ get fpu FPSCR
  363. andn r2, r1, #0x08000000
  364. ctf r2, s31 @ clear 27 bit
  365. mov r2, sp @ nothing stacked - regdump is at TOS
  366. mov lr, r19 @ setup for a return to the user code
  367. @ Now call the C code to package up the bounce to the support code
  368. @ r0 holds the trigger instruction
  369. @ r1 holds the FPSCR value
  370. @ r2 pointer to register dump
  371. b ucf64_exchandler
  372. 209:
  373. #endif
  374. @
  375. @ Call the processor-specific abort handler:
  376. @
  377. @ r2 - aborted context pc
  378. @ r3 - aborted context asr
  379. @
  380. @ The abort handler must return the aborted address in r0, and
  381. @ the fault status register in r1.
  382. @
  383. movc r1, p0.c3, #0 @ get FSR
  384. movc r0, p0.c4, #0 @ get FAR
  385. @
  386. @ INTRs on, then call the main handler
  387. @
  388. enable_irq r2
  389. mov r2, sp
  390. adr lr, ret_from_exception
  391. b do_DataAbort
  392. ENDPROC(__dabt_user)
  393. .align 5
  394. __intr_user:
  395. user_entry
  396. get_thread_info tsk
  397. intr_handler
  398. mov why, #0
  399. b ret_to_user
  400. ENDPROC(__intr_user)
  401. .ltorg
  402. .align 5
  403. __extn_user:
  404. user_entry
  405. mov r0, sp
  406. mov r1, asr
  407. b bad_mode
  408. ENDPROC(__extn_user)
  409. .align 5
  410. __pabt_user:
  411. user_entry
  412. mov r0, r2 @ pass address of aborted instruction.
  413. mov r1, #5
  414. enable_irq r1 @ Enable interrupts
  415. mov r2, sp @ regs
  416. b.l do_PrefetchAbort @ call abort handler
  417. /* fall through */
  418. /*
  419. * This is the return code to user mode for abort handlers
  420. */
  421. ENTRY(ret_from_exception)
  422. get_thread_info tsk
  423. mov why, #0
  424. b ret_to_user
  425. ENDPROC(__pabt_user)
  426. ENDPROC(ret_from_exception)
  427. /*
  428. * Register switch for UniCore V2 processors
  429. * r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info
  430. * previous and next are guaranteed not to be the same.
  431. */
  432. ENTRY(__switch_to)
  433. add ip, r1, #TI_CPU_SAVE
  434. stm.w (r4 - r15), [ip]+
  435. stm.w (r16 - r27, sp, lr), [ip]+
  436. #ifdef CONFIG_UNICORE_FPU_F64
  437. add ip, r1, #TI_FPSTATE
  438. sfm.w (f0 - f7 ), [ip]+
  439. sfm.w (f8 - f15), [ip]+
  440. sfm.w (f16 - f23), [ip]+
  441. sfm.w (f24 - f31), [ip]+
  442. cff r4, s31
  443. stw r4, [ip]
  444. add ip, r2, #TI_FPSTATE
  445. lfm.w (f0 - f7 ), [ip]+
  446. lfm.w (f8 - f15), [ip]+
  447. lfm.w (f16 - f23), [ip]+
  448. lfm.w (f24 - f31), [ip]+
  449. ldw r4, [ip]
  450. ctf r4, s31
  451. #endif
  452. add ip, r2, #TI_CPU_SAVE
  453. ldm.w (r4 - r15), [ip]+
  454. ldm (r16 - r27, sp, pc), [ip]+ @ Load all regs saved previously
  455. ENDPROC(__switch_to)
  456. .align 5
  457. /*
  458. * This is the fast syscall return path. We do as little as
  459. * possible here, and this includes saving r0 back into the PRIV
  460. * stack.
  461. */
  462. ret_fast_syscall:
  463. disable_irq r1 @ disable interrupts
  464. ldw r1, [tsk+], #TI_FLAGS
  465. cand.a r1, #_TIF_WORK_MASK
  466. bne fast_work_pending
  467. @ fast_restore_user_regs
  468. restore_user_regs fast = 1, offset = S_OFF
  469. /*
  470. * Ok, we need to do extra processing, enter the slow path.
  471. */
  472. fast_work_pending:
  473. stw.w r0, [sp+], #S_R0+S_OFF @ returned r0
  474. work_pending:
  475. cand.a r1, #_TIF_NEED_RESCHED
  476. bne work_resched
  477. mov r0, sp @ 'regs'
  478. mov r2, why @ 'syscall'
  479. cand.a r1, #_TIF_SIGPENDING @ delivering a signal?
  480. cmovne why, #0 @ prevent further restarts
  481. b.l do_notify_resume
  482. b ret_slow_syscall @ Check work again
  483. work_resched:
  484. b.l schedule
  485. /*
  486. * "slow" syscall return path. "why" tells us if this was a real syscall.
  487. */
  488. ENTRY(ret_to_user)
  489. ret_slow_syscall:
  490. disable_irq r1 @ disable interrupts
  491. get_thread_info tsk @ epip4d, one path error?!
  492. ldw r1, [tsk+], #TI_FLAGS
  493. cand.a r1, #_TIF_WORK_MASK
  494. bne work_pending
  495. no_work_pending:
  496. @ slow_restore_user_regs
  497. restore_user_regs fast = 0, offset = 0
  498. ENDPROC(ret_to_user)
  499. /*
  500. * This is how we return from a fork.
  501. */
  502. ENTRY(ret_from_fork)
  503. b.l schedule_tail
  504. b ret_slow_syscall
  505. ENDPROC(ret_from_fork)
  506. ENTRY(ret_from_kernel_thread)
  507. b.l schedule_tail
  508. mov r0, r5
  509. adr lr, ret_slow_syscall
  510. mov pc, r4
  511. ENDPROC(ret_from_kernel_thread)
  512. /*=============================================================================
  513. * SWI handler
  514. *-----------------------------------------------------------------------------
  515. */
  516. .align 5
  517. ENTRY(vector_swi)
  518. sub sp, sp, #S_FRAME_SIZE
  519. stm (r0 - r15), [sp]+ @ Calling r0 - r15
  520. add r8, sp, #S_R16
  521. stm (r16 - r28), [r8]+ @ Calling r16 - r28
  522. add r8, sp, #S_PC
  523. stur (sp, lr), [r8-] @ Calling sp, lr
  524. mov r8, bsr @ called from non-REAL mode
  525. stw lr, [sp+], #S_PC @ Save calling PC
  526. stw r8, [sp+], #S_PSR @ Save ASR
  527. stw r0, [sp+], #S_OLD_R0 @ Save OLD_R0
  528. zero_fp
  529. /*
  530. * Get the system call number.
  531. */
  532. sub ip, lr, #4
  533. ldw.u scno, [ip] @ get SWI instruction
  534. #ifdef CONFIG_ALIGNMENT_TRAP
  535. ldw ip, __cr_alignment
  536. ldw ip, [ip]
  537. movc p0.c1, ip, #0 @ update control register
  538. #endif
  539. enable_irq ip
  540. get_thread_info tsk
  541. ldw tbl, =sys_call_table @ load syscall table pointer
  542. andn scno, scno, #0xff000000 @ mask off SWI op-code
  543. andn scno, scno, #0x00ff0000 @ mask off SWI op-code
  544. stm.w (r4, r5), [sp-] @ push fifth and sixth args
  545. ldw ip, [tsk+], #TI_FLAGS @ check for syscall tracing
  546. cand.a ip, #_TIF_SYSCALL_TRACE @ are we tracing syscalls?
  547. bne __sys_trace
  548. csub.a scno, #__NR_syscalls @ check upper syscall limit
  549. adr lr, ret_fast_syscall @ return address
  550. bea 1f
  551. ldw pc, [tbl+], scno << #2 @ call sys_* routine
  552. 1:
  553. add r1, sp, #S_OFF
  554. 2: mov why, #0 @ no longer a real syscall
  555. b sys_ni_syscall @ not private func
  556. /*
  557. * This is the really slow path. We're going to be doing
  558. * context switches, and waiting for our parent to respond.
  559. */
  560. __sys_trace:
  561. mov r2, scno
  562. add r1, sp, #S_OFF
  563. mov r0, #0 @ trace entry [IP = 0]
  564. b.l syscall_trace
  565. adr lr, __sys_trace_return @ return address
  566. mov scno, r0 @ syscall number (possibly new)
  567. add r1, sp, #S_R0 + S_OFF @ pointer to regs
  568. csub.a scno, #__NR_syscalls @ check upper syscall limit
  569. bea 2b
  570. ldm (r0 - r3), [r1]+ @ have to reload r0 - r3
  571. ldw pc, [tbl+], scno << #2 @ call sys_* routine
  572. __sys_trace_return:
  573. stw.w r0, [sp+], #S_R0 + S_OFF @ save returned r0
  574. mov r2, scno
  575. mov r1, sp
  576. mov r0, #1 @ trace exit [IP = 1]
  577. b.l syscall_trace
  578. b ret_slow_syscall
  579. .align 5
  580. #ifdef CONFIG_ALIGNMENT_TRAP
  581. .type __cr_alignment, #object
  582. __cr_alignment:
  583. .word cr_alignment
  584. #endif
  585. .ltorg
  586. ENTRY(sys_rt_sigreturn)
  587. add r0, sp, #S_OFF
  588. mov why, #0 @ prevent syscall restart handling
  589. b __sys_rt_sigreturn
  590. ENDPROC(sys_rt_sigreturn)
  591. __INIT
  592. /*
  593. * Vector stubs.
  594. *
  595. * This code is copied to 0xffff0200 so we can use branches in the
  596. * vectors, rather than ldr's. Note that this code must not
  597. * exceed 0x300 bytes.
  598. *
  599. * Common stub entry macro:
  600. * Enter in INTR mode, bsr = PRIV/USER ASR, lr = PRIV/USER PC
  601. *
  602. * SP points to a minimal amount of processor-private memory, the address
  603. * of which is copied into r0 for the mode specific abort handler.
  604. */
  605. .macro vector_stub, name, mode
  606. .align 5
  607. vector_\name:
  608. @
  609. @ Save r0, lr_<exception> (parent PC) and bsr_<exception>
  610. @ (parent ASR)
  611. @
  612. stw r0, [sp]
  613. stw lr, [sp+], #4 @ save r0, lr
  614. mov lr, bsr
  615. stw lr, [sp+], #8 @ save bsr
  616. @
  617. @ Prepare for PRIV mode. INTRs remain disabled.
  618. @
  619. mov r0, asr
  620. xor r0, r0, #(\mode ^ PRIV_MODE)
  621. mov.a bsr, r0
  622. @
  623. @ the branch table must immediately follow this code
  624. @
  625. and lr, lr, #0x03
  626. add lr, lr, #1
  627. mov r0, sp
  628. ldw lr, [pc+], lr << #2
  629. mov.a pc, lr @ branch to handler in PRIV mode
  630. ENDPROC(vector_\name)
  631. .align 2
  632. @ handler addresses follow this label
  633. .endm
  634. .globl __stubs_start
  635. __stubs_start:
  636. /*
  637. * Interrupt dispatcher
  638. */
  639. vector_stub intr, INTR_MODE
  640. .long __intr_user @ 0 (USER)
  641. .long __invalid @ 1
  642. .long __invalid @ 2
  643. .long __intr_priv @ 3 (PRIV)
  644. /*
  645. * Data abort dispatcher
  646. * Enter in ABT mode, bsr = USER ASR, lr = USER PC
  647. */
  648. vector_stub dabt, ABRT_MODE
  649. .long __dabt_user @ 0 (USER)
  650. .long __invalid @ 1
  651. .long __invalid @ 2 (INTR)
  652. .long __dabt_priv @ 3 (PRIV)
  653. /*
  654. * Prefetch abort dispatcher
  655. * Enter in ABT mode, bsr = USER ASR, lr = USER PC
  656. */
  657. vector_stub pabt, ABRT_MODE
  658. .long __pabt_user @ 0 (USER)
  659. .long __invalid @ 1
  660. .long __invalid @ 2 (INTR)
  661. .long __pabt_priv @ 3 (PRIV)
  662. /*
  663. * Undef instr entry dispatcher
  664. * Enter in EXTN mode, bsr = PRIV/USER ASR, lr = PRIV/USER PC
  665. */
  666. vector_stub extn, EXTN_MODE
  667. .long __extn_user @ 0 (USER)
  668. .long __invalid @ 1
  669. .long __invalid @ 2 (INTR)
  670. .long __extn_priv @ 3 (PRIV)
  671. /*
  672. * We group all the following data together to optimise
  673. * for CPUs with separate I & D caches.
  674. */
  675. .align 5
  676. .LCvswi:
  677. .word vector_swi
  678. .globl __stubs_end
  679. __stubs_end:
  680. .equ stubs_offset, __vectors_start + 0x200 - __stubs_start
  681. .globl __vectors_start
  682. __vectors_start:
  683. jepriv SYS_ERROR0
  684. b vector_extn + stubs_offset
  685. ldw pc, .LCvswi + stubs_offset
  686. b vector_pabt + stubs_offset
  687. b vector_dabt + stubs_offset
  688. jepriv SYS_ERROR0
  689. b vector_intr + stubs_offset
  690. jepriv SYS_ERROR0
  691. .globl __vectors_end
  692. __vectors_end:
  693. .data
  694. .globl cr_alignment
  695. .globl cr_no_alignment
  696. cr_alignment:
  697. .space 4
  698. cr_no_alignment:
  699. .space 4