uasm-micromips.c 7.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * A small micro-assembler. It is intentionally kept simple, does only
  7. * support a subset of instructions, and does not try to hide pipeline
  8. * effects like branch delay slots.
  9. *
  10. * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer
  11. * Copyright (C) 2005, 2007 Maciej W. Rozycki
  12. * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org)
  13. * Copyright (C) 2012, 2013 MIPS Technologies, Inc. All rights reserved.
  14. */
  15. #include <linux/kernel.h>
  16. #include <linux/types.h>
  17. #include <asm/inst.h>
  18. #include <asm/elf.h>
  19. #include <asm/bugs.h>
  20. #define UASM_ISA _UASM_ISA_MICROMIPS
  21. #include <asm/uasm.h>
  22. #define RS_MASK 0x1f
  23. #define RS_SH 16
  24. #define RT_MASK 0x1f
  25. #define RT_SH 21
  26. #define SCIMM_MASK 0x3ff
  27. #define SCIMM_SH 16
  28. /* This macro sets the non-variable bits of an instruction. */
  29. #define M(a, b, c, d, e, f) \
  30. ((a) << OP_SH \
  31. | (b) << RT_SH \
  32. | (c) << RS_SH \
  33. | (d) << RD_SH \
  34. | (e) << RE_SH \
  35. | (f) << FUNC_SH)
  36. #include "uasm.c"
  37. static struct insn insn_table_MM[] = {
  38. { insn_addu, M(mm_pool32a_op, 0, 0, 0, 0, mm_addu32_op), RT | RS | RD },
  39. { insn_addiu, M(mm_addiu32_op, 0, 0, 0, 0, 0), RT | RS | SIMM },
  40. { insn_and, M(mm_pool32a_op, 0, 0, 0, 0, mm_and_op), RT | RS | RD },
  41. { insn_andi, M(mm_andi32_op, 0, 0, 0, 0, 0), RT | RS | UIMM },
  42. { insn_beq, M(mm_beq32_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
  43. { insn_beql, 0, 0 },
  44. { insn_bgez, M(mm_pool32i_op, mm_bgez_op, 0, 0, 0, 0), RS | BIMM },
  45. { insn_bgezl, 0, 0 },
  46. { insn_bltz, M(mm_pool32i_op, mm_bltz_op, 0, 0, 0, 0), RS | BIMM },
  47. { insn_bltzl, 0, 0 },
  48. { insn_bne, M(mm_bne32_op, 0, 0, 0, 0, 0), RT | RS | BIMM },
  49. { insn_cache, M(mm_pool32b_op, 0, 0, mm_cache_func, 0, 0), RT | RS | SIMM },
  50. { insn_daddu, 0, 0 },
  51. { insn_daddiu, 0, 0 },
  52. { insn_divu, M(mm_pool32a_op, 0, 0, 0, mm_divu_op, mm_pool32axf_op), RT | RS },
  53. { insn_dmfc0, 0, 0 },
  54. { insn_dmtc0, 0, 0 },
  55. { insn_dsll, 0, 0 },
  56. { insn_dsll32, 0, 0 },
  57. { insn_dsra, 0, 0 },
  58. { insn_dsrl, 0, 0 },
  59. { insn_dsrl32, 0, 0 },
  60. { insn_drotr, 0, 0 },
  61. { insn_drotr32, 0, 0 },
  62. { insn_dsubu, 0, 0 },
  63. { insn_eret, M(mm_pool32a_op, 0, 0, 0, mm_eret_op, mm_pool32axf_op), 0 },
  64. { insn_ins, M(mm_pool32a_op, 0, 0, 0, 0, mm_ins_op), RT | RS | RD | RE },
  65. { insn_ext, M(mm_pool32a_op, 0, 0, 0, 0, mm_ext_op), RT | RS | RD | RE },
  66. { insn_j, M(mm_j32_op, 0, 0, 0, 0, 0), JIMM },
  67. { insn_jal, M(mm_jal32_op, 0, 0, 0, 0, 0), JIMM },
  68. { insn_jalr, M(mm_pool32a_op, 0, 0, 0, mm_jalr_op, mm_pool32axf_op), RT | RS },
  69. { insn_jr, M(mm_pool32a_op, 0, 0, 0, mm_jalr_op, mm_pool32axf_op), RS },
  70. { insn_lb, M(mm_lb32_op, 0, 0, 0, 0, 0), RT | RS | SIMM },
  71. { insn_ld, 0, 0 },
  72. { insn_lh, M(mm_lh32_op, 0, 0, 0, 0, 0), RT | RS | SIMM },
  73. { insn_ll, M(mm_pool32c_op, 0, 0, (mm_ll_func << 1), 0, 0), RS | RT | SIMM },
  74. { insn_lld, 0, 0 },
  75. { insn_lui, M(mm_pool32i_op, mm_lui_op, 0, 0, 0, 0), RS | SIMM },
  76. { insn_lw, M(mm_lw32_op, 0, 0, 0, 0, 0), RT | RS | SIMM },
  77. { insn_mfc0, M(mm_pool32a_op, 0, 0, 0, mm_mfc0_op, mm_pool32axf_op), RT | RS | RD },
  78. { insn_mfhi, M(mm_pool32a_op, 0, 0, 0, mm_mfhi32_op, mm_pool32axf_op), RS },
  79. { insn_mflo, M(mm_pool32a_op, 0, 0, 0, mm_mflo32_op, mm_pool32axf_op), RS },
  80. { insn_mtc0, M(mm_pool32a_op, 0, 0, 0, mm_mtc0_op, mm_pool32axf_op), RT | RS | RD },
  81. { insn_mul, M(mm_pool32a_op, 0, 0, 0, 0, mm_mul_op), RT | RS | RD },
  82. { insn_or, M(mm_pool32a_op, 0, 0, 0, 0, mm_or32_op), RT | RS | RD },
  83. { insn_ori, M(mm_ori32_op, 0, 0, 0, 0, 0), RT | RS | UIMM },
  84. { insn_pref, M(mm_pool32c_op, 0, 0, (mm_pref_func << 1), 0, 0), RT | RS | SIMM },
  85. { insn_rfe, 0, 0 },
  86. { insn_sc, M(mm_pool32c_op, 0, 0, (mm_sc_func << 1), 0, 0), RT | RS | SIMM },
  87. { insn_scd, 0, 0 },
  88. { insn_sd, 0, 0 },
  89. { insn_sll, M(mm_pool32a_op, 0, 0, 0, 0, mm_sll32_op), RT | RS | RD },
  90. { insn_sllv, M(mm_pool32a_op, 0, 0, 0, 0, mm_sllv32_op), RT | RS | RD },
  91. { insn_slt, M(mm_pool32a_op, 0, 0, 0, 0, mm_slt_op), RT | RS | RD },
  92. { insn_sltiu, M(mm_sltiu32_op, 0, 0, 0, 0, 0), RT | RS | SIMM },
  93. { insn_sltu, M(mm_pool32a_op, 0, 0, 0, 0, mm_sltu_op), RT | RS | RD },
  94. { insn_sra, M(mm_pool32a_op, 0, 0, 0, 0, mm_sra_op), RT | RS | RD },
  95. { insn_srl, M(mm_pool32a_op, 0, 0, 0, 0, mm_srl32_op), RT | RS | RD },
  96. { insn_srlv, M(mm_pool32a_op, 0, 0, 0, 0, mm_srlv32_op), RT | RS | RD },
  97. { insn_rotr, M(mm_pool32a_op, 0, 0, 0, 0, mm_rotr_op), RT | RS | RD },
  98. { insn_subu, M(mm_pool32a_op, 0, 0, 0, 0, mm_subu32_op), RT | RS | RD },
  99. { insn_sw, M(mm_sw32_op, 0, 0, 0, 0, 0), RT | RS | SIMM },
  100. { insn_sync, M(mm_pool32a_op, 0, 0, 0, mm_sync_op, mm_pool32axf_op), RS },
  101. { insn_tlbp, M(mm_pool32a_op, 0, 0, 0, mm_tlbp_op, mm_pool32axf_op), 0 },
  102. { insn_tlbr, M(mm_pool32a_op, 0, 0, 0, mm_tlbr_op, mm_pool32axf_op), 0 },
  103. { insn_tlbwi, M(mm_pool32a_op, 0, 0, 0, mm_tlbwi_op, mm_pool32axf_op), 0 },
  104. { insn_tlbwr, M(mm_pool32a_op, 0, 0, 0, mm_tlbwr_op, mm_pool32axf_op), 0 },
  105. { insn_wait, M(mm_pool32a_op, 0, 0, 0, mm_wait_op, mm_pool32axf_op), SCIMM },
  106. { insn_wsbh, M(mm_pool32a_op, 0, 0, 0, mm_wsbh_op, mm_pool32axf_op), RT | RS },
  107. { insn_xor, M(mm_pool32a_op, 0, 0, 0, 0, mm_xor32_op), RT | RS | RD },
  108. { insn_xori, M(mm_xori32_op, 0, 0, 0, 0, 0), RT | RS | UIMM },
  109. { insn_dins, 0, 0 },
  110. { insn_dinsm, 0, 0 },
  111. { insn_syscall, M(mm_pool32a_op, 0, 0, 0, mm_syscall_op, mm_pool32axf_op), SCIMM},
  112. { insn_bbit0, 0, 0 },
  113. { insn_bbit1, 0, 0 },
  114. { insn_lwx, 0, 0 },
  115. { insn_ldx, 0, 0 },
  116. { insn_invalid, 0, 0 }
  117. };
  118. #undef M
  119. static inline u32 build_bimm(s32 arg)
  120. {
  121. WARN(arg > 0xffff || arg < -0x10000,
  122. KERN_WARNING "Micro-assembler field overflow\n");
  123. WARN(arg & 0x3, KERN_WARNING "Invalid micro-assembler branch target\n");
  124. return ((arg < 0) ? (1 << 15) : 0) | ((arg >> 1) & 0x7fff);
  125. }
  126. static inline u32 build_jimm(u32 arg)
  127. {
  128. WARN(arg & ~((JIMM_MASK << 2) | 1),
  129. KERN_WARNING "Micro-assembler field overflow\n");
  130. return (arg >> 1) & JIMM_MASK;
  131. }
  132. /*
  133. * The order of opcode arguments is implicitly left to right,
  134. * starting with RS and ending with FUNC or IMM.
  135. */
  136. static void build_insn(u32 **buf, enum opcode opc, ...)
  137. {
  138. struct insn *ip = NULL;
  139. unsigned int i;
  140. va_list ap;
  141. u32 op;
  142. for (i = 0; insn_table_MM[i].opcode != insn_invalid; i++)
  143. if (insn_table_MM[i].opcode == opc) {
  144. ip = &insn_table_MM[i];
  145. break;
  146. }
  147. if (!ip || (opc == insn_daddiu && r4k_daddiu_bug()))
  148. panic("Unsupported Micro-assembler instruction %d", opc);
  149. op = ip->match;
  150. va_start(ap, opc);
  151. if (ip->fields & RS) {
  152. if (opc == insn_mfc0 || opc == insn_mtc0)
  153. op |= build_rt(va_arg(ap, u32));
  154. else
  155. op |= build_rs(va_arg(ap, u32));
  156. }
  157. if (ip->fields & RT) {
  158. if (opc == insn_mfc0 || opc == insn_mtc0)
  159. op |= build_rs(va_arg(ap, u32));
  160. else
  161. op |= build_rt(va_arg(ap, u32));
  162. }
  163. if (ip->fields & RD)
  164. op |= build_rd(va_arg(ap, u32));
  165. if (ip->fields & RE)
  166. op |= build_re(va_arg(ap, u32));
  167. if (ip->fields & SIMM)
  168. op |= build_simm(va_arg(ap, s32));
  169. if (ip->fields & UIMM)
  170. op |= build_uimm(va_arg(ap, u32));
  171. if (ip->fields & BIMM)
  172. op |= build_bimm(va_arg(ap, s32));
  173. if (ip->fields & JIMM)
  174. op |= build_jimm(va_arg(ap, u32));
  175. if (ip->fields & FUNC)
  176. op |= build_func(va_arg(ap, u32));
  177. if (ip->fields & SET)
  178. op |= build_set(va_arg(ap, u32));
  179. if (ip->fields & SCIMM)
  180. op |= build_scimm(va_arg(ap, u32));
  181. va_end(ap);
  182. #ifdef CONFIG_CPU_LITTLE_ENDIAN
  183. **buf = ((op & 0xffff) << 16) | (op >> 16);
  184. #else
  185. **buf = op;
  186. #endif
  187. (*buf)++;
  188. }
  189. static inline void
  190. __resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab)
  191. {
  192. long laddr = (long)lab->addr;
  193. long raddr = (long)rel->addr;
  194. switch (rel->type) {
  195. case R_MIPS_PC16:
  196. #ifdef CONFIG_CPU_LITTLE_ENDIAN
  197. *rel->addr |= (build_bimm(laddr - (raddr + 4)) << 16);
  198. #else
  199. *rel->addr |= build_bimm(laddr - (raddr + 4));
  200. #endif
  201. break;
  202. default:
  203. panic("Unsupported Micro-assembler relocation %d",
  204. rel->type);
  205. }
  206. }