helpers.S 1.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263
  1. .align 32
  2. .globl __flushw_user
  3. .type __flushw_user,#function
  4. __flushw_user:
  5. rdpr %otherwin, %g1
  6. brz,pn %g1, 2f
  7. clr %g2
  8. 1: save %sp, -128, %sp
  9. rdpr %otherwin, %g1
  10. brnz,pt %g1, 1b
  11. add %g2, 1, %g2
  12. 1: sub %g2, 1, %g2
  13. brnz,pt %g2, 1b
  14. restore %g0, %g0, %g0
  15. 2: retl
  16. nop
  17. .size __flushw_user,.-__flushw_user
  18. /* Flush %fp and %i7 to the stack for all register
  19. * windows active inside of the cpu. This allows
  20. * show_stack_trace() to avoid using an expensive
  21. * 'flushw'.
  22. */
  23. .globl stack_trace_flush
  24. .type stack_trace_flush,#function
  25. stack_trace_flush:
  26. rdpr %pstate, %o0
  27. wrpr %o0, PSTATE_IE, %pstate
  28. rdpr %cwp, %g1
  29. rdpr %canrestore, %g2
  30. sub %g1, 1, %g3
  31. 1: brz,pn %g2, 2f
  32. sub %g2, 1, %g2
  33. wrpr %g3, %cwp
  34. stx %fp, [%sp + STACK_BIAS + RW_V9_I6]
  35. stx %i7, [%sp + STACK_BIAS + RW_V9_I7]
  36. ba,pt %xcc, 1b
  37. sub %g3, 1, %g3
  38. 2: wrpr %g1, %cwp
  39. wrpr %o0, %pstate
  40. retl
  41. nop
  42. .size stack_trace_flush,.-stack_trace_flush
  43. #ifdef CONFIG_SMP
  44. .globl hard_smp_processor_id
  45. .type hard_smp_processor_id,#function
  46. hard_smp_processor_id:
  47. #endif
  48. .globl real_hard_smp_processor_id
  49. .type real_hard_smp_processor_id,#function
  50. real_hard_smp_processor_id:
  51. __GET_CPUID(%o0)
  52. retl
  53. nop
  54. #ifdef CONFIG_SMP
  55. .size hard_smp_processor_id,.-hard_smp_processor_id
  56. #endif
  57. .size real_hard_smp_processor_id,.-real_hard_smp_processor_id