rwsem.S 2.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123
  1. /*
  2. * x86 semaphore implementation.
  3. *
  4. * (C) Copyright 1999 Linus Torvalds
  5. *
  6. * Portions Copyright 1999 Red Hat, Inc.
  7. *
  8. * This program is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU General Public License
  10. * as published by the Free Software Foundation; either version
  11. * 2 of the License, or (at your option) any later version.
  12. *
  13. * rw semaphores implemented November 1999 by Benjamin LaHaise <bcrl@kvack.org>
  14. */
  15. #include <linux/linkage.h>
  16. #include <asm/alternative-asm.h>
  17. #define __ASM_HALF_REG(reg) __ASM_SEL(reg, e##reg)
  18. #define __ASM_HALF_SIZE(inst) __ASM_SEL(inst##w, inst##l)
  19. #ifdef CONFIG_X86_32
  20. /*
  21. * The semaphore operations have a special calling sequence that
  22. * allow us to do a simpler in-line version of them. These routines
  23. * need to convert that sequence back into the C sequence when
  24. * there is contention on the semaphore.
  25. *
  26. * %eax contains the semaphore pointer on entry. Save the C-clobbered
  27. * registers (%eax, %edx and %ecx) except %eax whish is either a return
  28. * value or just clobbered..
  29. */
  30. #define save_common_regs \
  31. pushl %ecx
  32. #define restore_common_regs \
  33. popl %ecx
  34. /* Avoid uglifying the argument copying x86-64 needs to do. */
  35. .macro movq src, dst
  36. .endm
  37. #else
  38. /*
  39. * x86-64 rwsem wrappers
  40. *
  41. * This interfaces the inline asm code to the slow-path
  42. * C routines. We need to save the call-clobbered regs
  43. * that the asm does not mark as clobbered, and move the
  44. * argument from %rax to %rdi.
  45. *
  46. * NOTE! We don't need to save %rax, because the functions
  47. * will always return the semaphore pointer in %rax (which
  48. * is also the input argument to these helpers)
  49. *
  50. * The following can clobber %rdx because the asm clobbers it:
  51. * call_rwsem_down_write_failed
  52. * call_rwsem_wake
  53. * but %rdi, %rsi, %rcx, %r8-r11 always need saving.
  54. */
  55. #define save_common_regs \
  56. pushq %rdi; \
  57. pushq %rsi; \
  58. pushq %rcx; \
  59. pushq %r8; \
  60. pushq %r9; \
  61. pushq %r10; \
  62. pushq %r11
  63. #define restore_common_regs \
  64. popq %r11; \
  65. popq %r10; \
  66. popq %r9; \
  67. popq %r8; \
  68. popq %rcx; \
  69. popq %rsi; \
  70. popq %rdi
  71. #endif
  72. /* Fix up special calling conventions */
  73. ENTRY(call_rwsem_down_read_failed)
  74. save_common_regs
  75. __ASM_SIZE(push,) %__ASM_REG(dx)
  76. movq %rax,%rdi
  77. call rwsem_down_read_failed
  78. __ASM_SIZE(pop,) %__ASM_REG(dx)
  79. restore_common_regs
  80. ret
  81. ENDPROC(call_rwsem_down_read_failed)
  82. ENTRY(call_rwsem_down_write_failed)
  83. save_common_regs
  84. movq %rax,%rdi
  85. call rwsem_down_write_failed
  86. restore_common_regs
  87. ret
  88. ENDPROC(call_rwsem_down_write_failed)
  89. ENTRY(call_rwsem_wake)
  90. /* do nothing if still outstanding active readers */
  91. __ASM_HALF_SIZE(dec) %__ASM_HALF_REG(dx)
  92. jnz 1f
  93. save_common_regs
  94. movq %rax,%rdi
  95. call rwsem_wake
  96. restore_common_regs
  97. 1: ret
  98. ENDPROC(call_rwsem_wake)
  99. ENTRY(call_rwsem_downgrade_wake)
  100. save_common_regs
  101. __ASM_SIZE(push,) %__ASM_REG(dx)
  102. movq %rax,%rdi
  103. call rwsem_downgrade_wake
  104. __ASM_SIZE(pop,) %__ASM_REG(dx)
  105. restore_common_regs
  106. ret
  107. ENDPROC(call_rwsem_downgrade_wake)