asmmacro.h 3.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135
  1. #ifndef _ASM_IA64_ASMMACRO_H
  2. #define _ASM_IA64_ASMMACRO_H
  3. /*
  4. * Copyright (C) 2000-2001, 2003-2004 Hewlett-Packard Co
  5. * David Mosberger-Tang <davidm@hpl.hp.com>
  6. */
  7. #define ENTRY(name) \
  8. .align 32; \
  9. .proc name; \
  10. name:
  11. #define ENTRY_MIN_ALIGN(name) \
  12. .align 16; \
  13. .proc name; \
  14. name:
  15. #define GLOBAL_ENTRY(name) \
  16. .global name; \
  17. ENTRY(name)
  18. #define END(name) \
  19. .endp name
  20. /*
  21. * Helper macros to make unwind directives more readable:
  22. */
  23. /* prologue_gr: */
  24. #define ASM_UNW_PRLG_RP 0x8
  25. #define ASM_UNW_PRLG_PFS 0x4
  26. #define ASM_UNW_PRLG_PSP 0x2
  27. #define ASM_UNW_PRLG_PR 0x1
  28. #define ASM_UNW_PRLG_GRSAVE(ninputs) (32+(ninputs))
  29. /*
  30. * Helper macros for accessing user memory.
  31. *
  32. * When adding any new .section/.previous entries here, make sure to
  33. * also add it to the DISCARD section in arch/ia64/kernel/gate.lds.S or
  34. * unpleasant things will happen.
  35. */
  36. .section "__ex_table", "a" // declare section & section attributes
  37. .previous
  38. # define EX(y,x...) \
  39. .xdata4 "__ex_table", 99f-., y-.; \
  40. [99:] x
  41. # define EXCLR(y,x...) \
  42. .xdata4 "__ex_table", 99f-., y-.+4; \
  43. [99:] x
  44. /*
  45. * Tag MCA recoverable instruction ranges.
  46. */
  47. .section "__mca_table", "a" // declare section & section attributes
  48. .previous
  49. # define MCA_RECOVER_RANGE(y) \
  50. .xdata4 "__mca_table", y-., 99f-.; \
  51. [99:]
  52. /*
  53. * Mark instructions that need a load of a virtual address patched to be
  54. * a load of a physical address. We use this either in critical performance
  55. * path (ivt.S - TLB miss processing) or in places where it might not be
  56. * safe to use a "tpa" instruction (mca_asm.S - error recovery).
  57. */
  58. .section ".data..patch.vtop", "a" // declare section & section attributes
  59. .previous
  60. #define LOAD_PHYSICAL(pr, reg, obj) \
  61. [1:](pr)movl reg = obj; \
  62. .xdata4 ".data..patch.vtop", 1b-.
  63. /*
  64. * For now, we always put in the McKinley E9 workaround. On CPUs that don't need it,
  65. * we'll patch out the work-around bundles with NOPs, so their impact is minimal.
  66. */
  67. #define DO_MCKINLEY_E9_WORKAROUND
  68. #ifdef DO_MCKINLEY_E9_WORKAROUND
  69. .section ".data..patch.mckinley_e9", "a"
  70. .previous
  71. /* workaround for Itanium 2 Errata 9: */
  72. # define FSYS_RETURN \
  73. .xdata4 ".data..patch.mckinley_e9", 1f-.; \
  74. 1:{ .mib; \
  75. nop.m 0; \
  76. mov r16=ar.pfs; \
  77. br.call.sptk.many b7=2f;; \
  78. }; \
  79. 2:{ .mib; \
  80. nop.m 0; \
  81. mov ar.pfs=r16; \
  82. br.ret.sptk.many b6;; \
  83. }
  84. #else
  85. # define FSYS_RETURN br.ret.sptk.many b6
  86. #endif
  87. /*
  88. * If physical stack register size is different from DEF_NUM_STACK_REG,
  89. * dynamically patch the kernel for correct size.
  90. */
  91. .section ".data..patch.phys_stack_reg", "a"
  92. .previous
  93. #define LOAD_PHYS_STACK_REG_SIZE(reg) \
  94. [1:] adds reg=IA64_NUM_PHYS_STACK_REG*8+8,r0; \
  95. .xdata4 ".data..patch.phys_stack_reg", 1b-.
  96. /*
  97. * Up until early 2004, use of .align within a function caused bad unwind info.
  98. * TEXT_ALIGN(n) expands into ".align n" if a fixed GAS is available or into nothing
  99. * otherwise.
  100. */
  101. #ifdef HAVE_WORKING_TEXT_ALIGN
  102. # define TEXT_ALIGN(n) .align n
  103. #else
  104. # define TEXT_ALIGN(n)
  105. #endif
  106. #ifdef HAVE_SERIALIZE_DIRECTIVE
  107. # define dv_serialize_data .serialize.data
  108. # define dv_serialize_instruction .serialize.instruction
  109. #else
  110. # define dv_serialize_data
  111. # define dv_serialize_instruction
  112. #endif
  113. #endif /* _ASM_IA64_ASMMACRO_H */