bpf_jit.c 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260
  1. /*
  2. * Just-In-Time compiler for BPF filters on MIPS
  3. *
  4. * Copyright (c) 2014 Imagination Technologies Ltd.
  5. * Author: Markos Chandras <markos.chandras@imgtec.com>
  6. *
  7. * This program is free software; you can redistribute it and/or modify it
  8. * under the terms of the GNU General Public License as published by the
  9. * Free Software Foundation; version 2 of the License.
  10. */
  11. #include <linux/bitops.h>
  12. #include <linux/compiler.h>
  13. #include <linux/errno.h>
  14. #include <linux/filter.h>
  15. #include <linux/if_vlan.h>
  16. #include <linux/kconfig.h>
  17. #include <linux/moduleloader.h>
  18. #include <linux/netdevice.h>
  19. #include <linux/string.h>
  20. #include <linux/slab.h>
  21. #include <linux/types.h>
  22. #include <asm/asm.h>
  23. #include <asm/bitops.h>
  24. #include <asm/cacheflush.h>
  25. #include <asm/cpu-features.h>
  26. #include <asm/uasm.h>
  27. #include "bpf_jit.h"
  28. /* ABI
  29. * r_skb_hl SKB header length
  30. * r_data SKB data pointer
  31. * r_off Offset
  32. * r_A BPF register A
  33. * r_X BPF register X
  34. * r_skb *skb
  35. * r_M *scratch memory
  36. * r_skb_len SKB length
  37. *
  38. * On entry (*bpf_func)(*skb, *filter)
  39. * a0 = MIPS_R_A0 = skb;
  40. * a1 = MIPS_R_A1 = filter;
  41. *
  42. * Stack
  43. * ...
  44. * M[15]
  45. * M[14]
  46. * M[13]
  47. * ...
  48. * M[0] <-- r_M
  49. * saved reg k-1
  50. * saved reg k-2
  51. * ...
  52. * saved reg 0 <-- r_sp
  53. * <no argument area>
  54. *
  55. * Packet layout
  56. *
  57. * <--------------------- len ------------------------>
  58. * <--skb-len(r_skb_hl)-->< ----- skb->data_len ------>
  59. * ----------------------------------------------------
  60. * | skb->data |
  61. * ----------------------------------------------------
  62. */
  63. #define ptr typeof(unsigned long)
  64. #define SCRATCH_OFF(k) (4 * (k))
  65. /* JIT flags */
  66. #define SEEN_CALL (1 << BPF_MEMWORDS)
  67. #define SEEN_SREG_SFT (BPF_MEMWORDS + 1)
  68. #define SEEN_SREG_BASE (1 << SEEN_SREG_SFT)
  69. #define SEEN_SREG(x) (SEEN_SREG_BASE << (x))
  70. #define SEEN_OFF SEEN_SREG(2)
  71. #define SEEN_A SEEN_SREG(3)
  72. #define SEEN_X SEEN_SREG(4)
  73. #define SEEN_SKB SEEN_SREG(5)
  74. #define SEEN_MEM SEEN_SREG(6)
  75. /* SEEN_SK_DATA also implies skb_hl an skb_len */
  76. #define SEEN_SKB_DATA (SEEN_SREG(7) | SEEN_SREG(1) | SEEN_SREG(0))
  77. /* Arguments used by JIT */
  78. #define ARGS_USED_BY_JIT 2 /* only applicable to 64-bit */
  79. #define SBIT(x) (1 << (x)) /* Signed version of BIT() */
  80. /**
  81. * struct jit_ctx - JIT context
  82. * @skf: The sk_filter
  83. * @prologue_bytes: Number of bytes for prologue
  84. * @idx: Instruction index
  85. * @flags: JIT flags
  86. * @offsets: Instruction offsets
  87. * @target: Memory location for the compiled filter
  88. */
  89. struct jit_ctx {
  90. const struct bpf_prog *skf;
  91. unsigned int prologue_bytes;
  92. u32 idx;
  93. u32 flags;
  94. u32 *offsets;
  95. u32 *target;
  96. };
  97. static inline int optimize_div(u32 *k)
  98. {
  99. /* power of 2 divides can be implemented with right shift */
  100. if (!(*k & (*k-1))) {
  101. *k = ilog2(*k);
  102. return 1;
  103. }
  104. return 0;
  105. }
  106. static inline void emit_jit_reg_move(ptr dst, ptr src, struct jit_ctx *ctx);
  107. /* Simply emit the instruction if the JIT memory space has been allocated */
  108. #define emit_instr(ctx, func, ...) \
  109. do { \
  110. if ((ctx)->target != NULL) { \
  111. u32 *p = &(ctx)->target[ctx->idx]; \
  112. uasm_i_##func(&p, ##__VA_ARGS__); \
  113. } \
  114. (ctx)->idx++; \
  115. } while (0)
  116. /*
  117. * Similar to emit_instr but it must be used when we need to emit
  118. * 32-bit or 64-bit instructions
  119. */
  120. #define emit_long_instr(ctx, func, ...) \
  121. do { \
  122. if ((ctx)->target != NULL) { \
  123. u32 *p = &(ctx)->target[ctx->idx]; \
  124. UASM_i_##func(&p, ##__VA_ARGS__); \
  125. } \
  126. (ctx)->idx++; \
  127. } while (0)
  128. /* Determine if immediate is within the 16-bit signed range */
  129. static inline bool is_range16(s32 imm)
  130. {
  131. return !(imm >= SBIT(15) || imm < -SBIT(15));
  132. }
  133. static inline void emit_addu(unsigned int dst, unsigned int src1,
  134. unsigned int src2, struct jit_ctx *ctx)
  135. {
  136. emit_instr(ctx, addu, dst, src1, src2);
  137. }
  138. static inline void emit_nop(struct jit_ctx *ctx)
  139. {
  140. emit_instr(ctx, nop);
  141. }
  142. /* Load a u32 immediate to a register */
  143. static inline void emit_load_imm(unsigned int dst, u32 imm, struct jit_ctx *ctx)
  144. {
  145. if (ctx->target != NULL) {
  146. /* addiu can only handle s16 */
  147. if (!is_range16(imm)) {
  148. u32 *p = &ctx->target[ctx->idx];
  149. uasm_i_lui(&p, r_tmp_imm, (s32)imm >> 16);
  150. p = &ctx->target[ctx->idx + 1];
  151. uasm_i_ori(&p, dst, r_tmp_imm, imm & 0xffff);
  152. } else {
  153. u32 *p = &ctx->target[ctx->idx];
  154. uasm_i_addiu(&p, dst, r_zero, imm);
  155. }
  156. }
  157. ctx->idx++;
  158. if (!is_range16(imm))
  159. ctx->idx++;
  160. }
  161. static inline void emit_or(unsigned int dst, unsigned int src1,
  162. unsigned int src2, struct jit_ctx *ctx)
  163. {
  164. emit_instr(ctx, or, dst, src1, src2);
  165. }
  166. static inline void emit_ori(unsigned int dst, unsigned src, u32 imm,
  167. struct jit_ctx *ctx)
  168. {
  169. if (imm >= BIT(16)) {
  170. emit_load_imm(r_tmp, imm, ctx);
  171. emit_or(dst, src, r_tmp, ctx);
  172. } else {
  173. emit_instr(ctx, ori, dst, src, imm);
  174. }
  175. }
  176. static inline void emit_daddiu(unsigned int dst, unsigned int src,
  177. int imm, struct jit_ctx *ctx)
  178. {
  179. /*
  180. * Only used for stack, so the imm is relatively small
  181. * and it fits in 15-bits
  182. */
  183. emit_instr(ctx, daddiu, dst, src, imm);
  184. }
  185. static inline void emit_addiu(unsigned int dst, unsigned int src,
  186. u32 imm, struct jit_ctx *ctx)
  187. {
  188. if (!is_range16(imm)) {
  189. emit_load_imm(r_tmp, imm, ctx);
  190. emit_addu(dst, r_tmp, src, ctx);
  191. } else {
  192. emit_instr(ctx, addiu, dst, src, imm);
  193. }
  194. }
  195. static inline void emit_and(unsigned int dst, unsigned int src1,
  196. unsigned int src2, struct jit_ctx *ctx)
  197. {
  198. emit_instr(ctx, and, dst, src1, src2);
  199. }
  200. static inline void emit_andi(unsigned int dst, unsigned int src,
  201. u32 imm, struct jit_ctx *ctx)
  202. {
  203. /* If imm does not fit in u16 then load it to register */
  204. if (imm >= BIT(16)) {
  205. emit_load_imm(r_tmp, imm, ctx);
  206. emit_and(dst, src, r_tmp, ctx);
  207. } else {
  208. emit_instr(ctx, andi, dst, src, imm);
  209. }
  210. }
  211. static inline void emit_xor(unsigned int dst, unsigned int src1,
  212. unsigned int src2, struct jit_ctx *ctx)
  213. {
  214. emit_instr(ctx, xor, dst, src1, src2);
  215. }
  216. static inline void emit_xori(ptr dst, ptr src, u32 imm, struct jit_ctx *ctx)
  217. {
  218. /* If imm does not fit in u16 then load it to register */
  219. if (imm >= BIT(16)) {
  220. emit_load_imm(r_tmp, imm, ctx);
  221. emit_xor(dst, src, r_tmp, ctx);
  222. } else {
  223. emit_instr(ctx, xori, dst, src, imm);
  224. }
  225. }
  226. static inline void emit_stack_offset(int offset, struct jit_ctx *ctx)
  227. {
  228. emit_long_instr(ctx, ADDIU, r_sp, r_sp, offset);
  229. }
  230. static inline void emit_subu(unsigned int dst, unsigned int src1,
  231. unsigned int src2, struct jit_ctx *ctx)
  232. {
  233. emit_instr(ctx, subu, dst, src1, src2);
  234. }
  235. static inline void emit_neg(unsigned int reg, struct jit_ctx *ctx)
  236. {
  237. emit_subu(reg, r_zero, reg, ctx);
  238. }
  239. static inline void emit_sllv(unsigned int dst, unsigned int src,
  240. unsigned int sa, struct jit_ctx *ctx)
  241. {
  242. emit_instr(ctx, sllv, dst, src, sa);
  243. }
  244. static inline void emit_sll(unsigned int dst, unsigned int src,
  245. unsigned int sa, struct jit_ctx *ctx)
  246. {
  247. /* sa is 5-bits long */
  248. if (sa >= BIT(5))
  249. /* Shifting >= 32 results in zero */
  250. emit_jit_reg_move(dst, r_zero, ctx);
  251. else
  252. emit_instr(ctx, sll, dst, src, sa);
  253. }
  254. static inline void emit_srlv(unsigned int dst, unsigned int src,
  255. unsigned int sa, struct jit_ctx *ctx)
  256. {
  257. emit_instr(ctx, srlv, dst, src, sa);
  258. }
  259. static inline void emit_srl(unsigned int dst, unsigned int src,
  260. unsigned int sa, struct jit_ctx *ctx)
  261. {
  262. /* sa is 5-bits long */
  263. if (sa >= BIT(5))
  264. /* Shifting >= 32 results in zero */
  265. emit_jit_reg_move(dst, r_zero, ctx);
  266. else
  267. emit_instr(ctx, srl, dst, src, sa);
  268. }
  269. static inline void emit_slt(unsigned int dst, unsigned int src1,
  270. unsigned int src2, struct jit_ctx *ctx)
  271. {
  272. emit_instr(ctx, slt, dst, src1, src2);
  273. }
  274. static inline void emit_sltu(unsigned int dst, unsigned int src1,
  275. unsigned int src2, struct jit_ctx *ctx)
  276. {
  277. emit_instr(ctx, sltu, dst, src1, src2);
  278. }
  279. static inline void emit_sltiu(unsigned dst, unsigned int src,
  280. unsigned int imm, struct jit_ctx *ctx)
  281. {
  282. /* 16 bit immediate */
  283. if (!is_range16((s32)imm)) {
  284. emit_load_imm(r_tmp, imm, ctx);
  285. emit_sltu(dst, src, r_tmp, ctx);
  286. } else {
  287. emit_instr(ctx, sltiu, dst, src, imm);
  288. }
  289. }
  290. /* Store register on the stack */
  291. static inline void emit_store_stack_reg(ptr reg, ptr base,
  292. unsigned int offset,
  293. struct jit_ctx *ctx)
  294. {
  295. emit_long_instr(ctx, SW, reg, offset, base);
  296. }
  297. static inline void emit_store(ptr reg, ptr base, unsigned int offset,
  298. struct jit_ctx *ctx)
  299. {
  300. emit_instr(ctx, sw, reg, offset, base);
  301. }
  302. static inline void emit_load_stack_reg(ptr reg, ptr base,
  303. unsigned int offset,
  304. struct jit_ctx *ctx)
  305. {
  306. emit_long_instr(ctx, LW, reg, offset, base);
  307. }
  308. static inline void emit_load(unsigned int reg, unsigned int base,
  309. unsigned int offset, struct jit_ctx *ctx)
  310. {
  311. emit_instr(ctx, lw, reg, offset, base);
  312. }
  313. static inline void emit_load_byte(unsigned int reg, unsigned int base,
  314. unsigned int offset, struct jit_ctx *ctx)
  315. {
  316. emit_instr(ctx, lb, reg, offset, base);
  317. }
  318. static inline void emit_half_load(unsigned int reg, unsigned int base,
  319. unsigned int offset, struct jit_ctx *ctx)
  320. {
  321. emit_instr(ctx, lh, reg, offset, base);
  322. }
  323. static inline void emit_mul(unsigned int dst, unsigned int src1,
  324. unsigned int src2, struct jit_ctx *ctx)
  325. {
  326. emit_instr(ctx, mul, dst, src1, src2);
  327. }
  328. static inline void emit_div(unsigned int dst, unsigned int src,
  329. struct jit_ctx *ctx)
  330. {
  331. if (ctx->target != NULL) {
  332. u32 *p = &ctx->target[ctx->idx];
  333. uasm_i_divu(&p, dst, src);
  334. p = &ctx->target[ctx->idx + 1];
  335. uasm_i_mflo(&p, dst);
  336. }
  337. ctx->idx += 2; /* 2 insts */
  338. }
  339. static inline void emit_mod(unsigned int dst, unsigned int src,
  340. struct jit_ctx *ctx)
  341. {
  342. if (ctx->target != NULL) {
  343. u32 *p = &ctx->target[ctx->idx];
  344. uasm_i_divu(&p, dst, src);
  345. p = &ctx->target[ctx->idx + 1];
  346. uasm_i_mfhi(&p, dst);
  347. }
  348. ctx->idx += 2; /* 2 insts */
  349. }
  350. static inline void emit_dsll(unsigned int dst, unsigned int src,
  351. unsigned int sa, struct jit_ctx *ctx)
  352. {
  353. emit_instr(ctx, dsll, dst, src, sa);
  354. }
  355. static inline void emit_dsrl32(unsigned int dst, unsigned int src,
  356. unsigned int sa, struct jit_ctx *ctx)
  357. {
  358. emit_instr(ctx, dsrl32, dst, src, sa);
  359. }
  360. static inline void emit_wsbh(unsigned int dst, unsigned int src,
  361. struct jit_ctx *ctx)
  362. {
  363. emit_instr(ctx, wsbh, dst, src);
  364. }
  365. /* load pointer to register */
  366. static inline void emit_load_ptr(unsigned int dst, unsigned int src,
  367. int imm, struct jit_ctx *ctx)
  368. {
  369. /* src contains the base addr of the 32/64-pointer */
  370. emit_long_instr(ctx, LW, dst, imm, src);
  371. }
  372. /* load a function pointer to register */
  373. static inline void emit_load_func(unsigned int reg, ptr imm,
  374. struct jit_ctx *ctx)
  375. {
  376. if (config_enabled(CONFIG_64BIT)) {
  377. /* At this point imm is always 64-bit */
  378. emit_load_imm(r_tmp, (u64)imm >> 32, ctx);
  379. emit_dsll(r_tmp_imm, r_tmp, 16, ctx); /* left shift by 16 */
  380. emit_ori(r_tmp, r_tmp_imm, (imm >> 16) & 0xffff, ctx);
  381. emit_dsll(r_tmp_imm, r_tmp, 16, ctx); /* left shift by 16 */
  382. emit_ori(reg, r_tmp_imm, imm & 0xffff, ctx);
  383. } else {
  384. emit_load_imm(reg, imm, ctx);
  385. }
  386. }
  387. /* Move to real MIPS register */
  388. static inline void emit_reg_move(ptr dst, ptr src, struct jit_ctx *ctx)
  389. {
  390. emit_long_instr(ctx, ADDU, dst, src, r_zero);
  391. }
  392. /* Move to JIT (32-bit) register */
  393. static inline void emit_jit_reg_move(ptr dst, ptr src, struct jit_ctx *ctx)
  394. {
  395. emit_addu(dst, src, r_zero, ctx);
  396. }
  397. /* Compute the immediate value for PC-relative branches. */
  398. static inline u32 b_imm(unsigned int tgt, struct jit_ctx *ctx)
  399. {
  400. if (ctx->target == NULL)
  401. return 0;
  402. /*
  403. * We want a pc-relative branch. We only do forward branches
  404. * so tgt is always after pc. tgt is the instruction offset
  405. * we want to jump to.
  406. * Branch on MIPS:
  407. * I: target_offset <- sign_extend(offset)
  408. * I+1: PC += target_offset (delay slot)
  409. *
  410. * ctx->idx currently points to the branch instruction
  411. * but the offset is added to the delay slot so we need
  412. * to subtract 4.
  413. */
  414. return ctx->offsets[tgt] -
  415. (ctx->idx * 4 - ctx->prologue_bytes) - 4;
  416. }
  417. static inline void emit_bcond(int cond, unsigned int reg1, unsigned int reg2,
  418. unsigned int imm, struct jit_ctx *ctx)
  419. {
  420. if (ctx->target != NULL) {
  421. u32 *p = &ctx->target[ctx->idx];
  422. switch (cond) {
  423. case MIPS_COND_EQ:
  424. uasm_i_beq(&p, reg1, reg2, imm);
  425. break;
  426. case MIPS_COND_NE:
  427. uasm_i_bne(&p, reg1, reg2, imm);
  428. break;
  429. case MIPS_COND_ALL:
  430. uasm_i_b(&p, imm);
  431. break;
  432. default:
  433. pr_warn("%s: Unhandled branch conditional: %d\n",
  434. __func__, cond);
  435. }
  436. }
  437. ctx->idx++;
  438. }
  439. static inline void emit_b(unsigned int imm, struct jit_ctx *ctx)
  440. {
  441. emit_bcond(MIPS_COND_ALL, r_zero, r_zero, imm, ctx);
  442. }
  443. static inline void emit_jalr(unsigned int link, unsigned int reg,
  444. struct jit_ctx *ctx)
  445. {
  446. emit_instr(ctx, jalr, link, reg);
  447. }
  448. static inline void emit_jr(unsigned int reg, struct jit_ctx *ctx)
  449. {
  450. emit_instr(ctx, jr, reg);
  451. }
  452. static inline u16 align_sp(unsigned int num)
  453. {
  454. /* Double word alignment for 32-bit, quadword for 64-bit */
  455. unsigned int align = config_enabled(CONFIG_64BIT) ? 16 : 8;
  456. num = (num + (align - 1)) & -align;
  457. return num;
  458. }
  459. static void save_bpf_jit_regs(struct jit_ctx *ctx, unsigned offset)
  460. {
  461. int i = 0, real_off = 0;
  462. u32 sflags, tmp_flags;
  463. /* Adjust the stack pointer */
  464. if (offset)
  465. emit_stack_offset(-align_sp(offset), ctx);
  466. tmp_flags = sflags = ctx->flags >> SEEN_SREG_SFT;
  467. /* sflags is essentially a bitmap */
  468. while (tmp_flags) {
  469. if ((sflags >> i) & 0x1) {
  470. emit_store_stack_reg(MIPS_R_S0 + i, r_sp, real_off,
  471. ctx);
  472. real_off += SZREG;
  473. }
  474. i++;
  475. tmp_flags >>= 1;
  476. }
  477. /* save return address */
  478. if (ctx->flags & SEEN_CALL) {
  479. emit_store_stack_reg(r_ra, r_sp, real_off, ctx);
  480. real_off += SZREG;
  481. }
  482. /* Setup r_M leaving the alignment gap if necessary */
  483. if (ctx->flags & SEEN_MEM) {
  484. if (real_off % (SZREG * 2))
  485. real_off += SZREG;
  486. emit_long_instr(ctx, ADDIU, r_M, r_sp, real_off);
  487. }
  488. }
  489. static void restore_bpf_jit_regs(struct jit_ctx *ctx,
  490. unsigned int offset)
  491. {
  492. int i, real_off = 0;
  493. u32 sflags, tmp_flags;
  494. tmp_flags = sflags = ctx->flags >> SEEN_SREG_SFT;
  495. /* sflags is a bitmap */
  496. i = 0;
  497. while (tmp_flags) {
  498. if ((sflags >> i) & 0x1) {
  499. emit_load_stack_reg(MIPS_R_S0 + i, r_sp, real_off,
  500. ctx);
  501. real_off += SZREG;
  502. }
  503. i++;
  504. tmp_flags >>= 1;
  505. }
  506. /* restore return address */
  507. if (ctx->flags & SEEN_CALL)
  508. emit_load_stack_reg(r_ra, r_sp, real_off, ctx);
  509. /* Restore the sp and discard the scrach memory */
  510. if (offset)
  511. emit_stack_offset(align_sp(offset), ctx);
  512. }
  513. static unsigned int get_stack_depth(struct jit_ctx *ctx)
  514. {
  515. int sp_off = 0;
  516. /* How may s* regs do we need to preserved? */
  517. sp_off += hweight32(ctx->flags >> SEEN_SREG_SFT) * SZREG;
  518. if (ctx->flags & SEEN_MEM)
  519. sp_off += 4 * BPF_MEMWORDS; /* BPF_MEMWORDS are 32-bit */
  520. if (ctx->flags & SEEN_CALL)
  521. sp_off += SZREG; /* Space for our ra register */
  522. return sp_off;
  523. }
  524. static void build_prologue(struct jit_ctx *ctx)
  525. {
  526. int sp_off;
  527. /* Calculate the total offset for the stack pointer */
  528. sp_off = get_stack_depth(ctx);
  529. save_bpf_jit_regs(ctx, sp_off);
  530. if (ctx->flags & SEEN_SKB)
  531. emit_reg_move(r_skb, MIPS_R_A0, ctx);
  532. if (ctx->flags & SEEN_SKB_DATA) {
  533. /* Load packet length */
  534. emit_load(r_skb_len, r_skb, offsetof(struct sk_buff, len),
  535. ctx);
  536. emit_load(r_tmp, r_skb, offsetof(struct sk_buff, data_len),
  537. ctx);
  538. /* Load the data pointer */
  539. emit_load_ptr(r_skb_data, r_skb,
  540. offsetof(struct sk_buff, data), ctx);
  541. /* Load the header length */
  542. emit_subu(r_skb_hl, r_skb_len, r_tmp, ctx);
  543. }
  544. if (ctx->flags & SEEN_X)
  545. emit_jit_reg_move(r_X, r_zero, ctx);
  546. /*
  547. * Do not leak kernel data to userspace, we only need to clear
  548. * r_A if it is ever used. In fact if it is never used, we
  549. * will not save/restore it, so clearing it in this case would
  550. * corrupt the state of the caller.
  551. */
  552. if (bpf_needs_clear_a(&ctx->skf->insns[0]) &&
  553. (ctx->flags & SEEN_A))
  554. emit_jit_reg_move(r_A, r_zero, ctx);
  555. }
  556. static void build_epilogue(struct jit_ctx *ctx)
  557. {
  558. unsigned int sp_off;
  559. /* Calculate the total offset for the stack pointer */
  560. sp_off = get_stack_depth(ctx);
  561. restore_bpf_jit_regs(ctx, sp_off);
  562. /* Return */
  563. emit_jr(r_ra, ctx);
  564. emit_nop(ctx);
  565. }
  566. #define CHOOSE_LOAD_FUNC(K, func) \
  567. ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative : func) : \
  568. func##_positive)
  569. static int build_body(struct jit_ctx *ctx)
  570. {
  571. const struct bpf_prog *prog = ctx->skf;
  572. const struct sock_filter *inst;
  573. unsigned int i, off, condt;
  574. u32 k, b_off __maybe_unused;
  575. u8 (*sk_load_func)(unsigned long *skb, int offset);
  576. for (i = 0; i < prog->len; i++) {
  577. u16 code;
  578. inst = &(prog->insns[i]);
  579. pr_debug("%s: code->0x%02x, jt->0x%x, jf->0x%x, k->0x%x\n",
  580. __func__, inst->code, inst->jt, inst->jf, inst->k);
  581. k = inst->k;
  582. code = bpf_anc_helper(inst);
  583. if (ctx->target == NULL)
  584. ctx->offsets[i] = ctx->idx * 4;
  585. switch (code) {
  586. case BPF_LD | BPF_IMM:
  587. /* A <- k ==> li r_A, k */
  588. ctx->flags |= SEEN_A;
  589. emit_load_imm(r_A, k, ctx);
  590. break;
  591. case BPF_LD | BPF_W | BPF_LEN:
  592. BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
  593. /* A <- len ==> lw r_A, offset(skb) */
  594. ctx->flags |= SEEN_SKB | SEEN_A;
  595. off = offsetof(struct sk_buff, len);
  596. emit_load(r_A, r_skb, off, ctx);
  597. break;
  598. case BPF_LD | BPF_MEM:
  599. /* A <- M[k] ==> lw r_A, offset(M) */
  600. ctx->flags |= SEEN_MEM | SEEN_A;
  601. emit_load(r_A, r_M, SCRATCH_OFF(k), ctx);
  602. break;
  603. case BPF_LD | BPF_W | BPF_ABS:
  604. /* A <- P[k:4] */
  605. sk_load_func = CHOOSE_LOAD_FUNC(k, sk_load_word);
  606. goto load;
  607. case BPF_LD | BPF_H | BPF_ABS:
  608. /* A <- P[k:2] */
  609. sk_load_func = CHOOSE_LOAD_FUNC(k, sk_load_half);
  610. goto load;
  611. case BPF_LD | BPF_B | BPF_ABS:
  612. /* A <- P[k:1] */
  613. sk_load_func = CHOOSE_LOAD_FUNC(k, sk_load_byte);
  614. load:
  615. emit_load_imm(r_off, k, ctx);
  616. load_common:
  617. ctx->flags |= SEEN_CALL | SEEN_OFF |
  618. SEEN_SKB | SEEN_A | SEEN_SKB_DATA;
  619. emit_load_func(r_s0, (ptr)sk_load_func, ctx);
  620. emit_reg_move(MIPS_R_A0, r_skb, ctx);
  621. emit_jalr(MIPS_R_RA, r_s0, ctx);
  622. /* Load second argument to delay slot */
  623. emit_reg_move(MIPS_R_A1, r_off, ctx);
  624. /* Check the error value */
  625. emit_bcond(MIPS_COND_EQ, r_ret, 0, b_imm(i + 1, ctx),
  626. ctx);
  627. /* Load return register on DS for failures */
  628. emit_reg_move(r_ret, r_zero, ctx);
  629. /* Return with error */
  630. emit_b(b_imm(prog->len, ctx), ctx);
  631. emit_nop(ctx);
  632. break;
  633. case BPF_LD | BPF_W | BPF_IND:
  634. /* A <- P[X + k:4] */
  635. sk_load_func = sk_load_word;
  636. goto load_ind;
  637. case BPF_LD | BPF_H | BPF_IND:
  638. /* A <- P[X + k:2] */
  639. sk_load_func = sk_load_half;
  640. goto load_ind;
  641. case BPF_LD | BPF_B | BPF_IND:
  642. /* A <- P[X + k:1] */
  643. sk_load_func = sk_load_byte;
  644. load_ind:
  645. ctx->flags |= SEEN_OFF | SEEN_X;
  646. emit_addiu(r_off, r_X, k, ctx);
  647. goto load_common;
  648. case BPF_LDX | BPF_IMM:
  649. /* X <- k */
  650. ctx->flags |= SEEN_X;
  651. emit_load_imm(r_X, k, ctx);
  652. break;
  653. case BPF_LDX | BPF_MEM:
  654. /* X <- M[k] */
  655. ctx->flags |= SEEN_X | SEEN_MEM;
  656. emit_load(r_X, r_M, SCRATCH_OFF(k), ctx);
  657. break;
  658. case BPF_LDX | BPF_W | BPF_LEN:
  659. /* X <- len */
  660. ctx->flags |= SEEN_X | SEEN_SKB;
  661. off = offsetof(struct sk_buff, len);
  662. emit_load(r_X, r_skb, off, ctx);
  663. break;
  664. case BPF_LDX | BPF_B | BPF_MSH:
  665. /* X <- 4 * (P[k:1] & 0xf) */
  666. ctx->flags |= SEEN_X | SEEN_CALL | SEEN_SKB;
  667. /* Load offset to a1 */
  668. emit_load_func(r_s0, (ptr)sk_load_byte, ctx);
  669. /*
  670. * This may emit two instructions so it may not fit
  671. * in the delay slot. So use a0 in the delay slot.
  672. */
  673. emit_load_imm(MIPS_R_A1, k, ctx);
  674. emit_jalr(MIPS_R_RA, r_s0, ctx);
  675. emit_reg_move(MIPS_R_A0, r_skb, ctx); /* delay slot */
  676. /* Check the error value */
  677. emit_bcond(MIPS_COND_NE, r_ret, 0,
  678. b_imm(prog->len, ctx), ctx);
  679. emit_reg_move(r_ret, r_zero, ctx);
  680. /* We are good */
  681. /* X <- P[1:K] & 0xf */
  682. emit_andi(r_X, r_A, 0xf, ctx);
  683. /* X << 2 */
  684. emit_b(b_imm(i + 1, ctx), ctx);
  685. emit_sll(r_X, r_X, 2, ctx); /* delay slot */
  686. break;
  687. case BPF_ST:
  688. /* M[k] <- A */
  689. ctx->flags |= SEEN_MEM | SEEN_A;
  690. emit_store(r_A, r_M, SCRATCH_OFF(k), ctx);
  691. break;
  692. case BPF_STX:
  693. /* M[k] <- X */
  694. ctx->flags |= SEEN_MEM | SEEN_X;
  695. emit_store(r_X, r_M, SCRATCH_OFF(k), ctx);
  696. break;
  697. case BPF_ALU | BPF_ADD | BPF_K:
  698. /* A += K */
  699. ctx->flags |= SEEN_A;
  700. emit_addiu(r_A, r_A, k, ctx);
  701. break;
  702. case BPF_ALU | BPF_ADD | BPF_X:
  703. /* A += X */
  704. ctx->flags |= SEEN_A | SEEN_X;
  705. emit_addu(r_A, r_A, r_X, ctx);
  706. break;
  707. case BPF_ALU | BPF_SUB | BPF_K:
  708. /* A -= K */
  709. ctx->flags |= SEEN_A;
  710. emit_addiu(r_A, r_A, -k, ctx);
  711. break;
  712. case BPF_ALU | BPF_SUB | BPF_X:
  713. /* A -= X */
  714. ctx->flags |= SEEN_A | SEEN_X;
  715. emit_subu(r_A, r_A, r_X, ctx);
  716. break;
  717. case BPF_ALU | BPF_MUL | BPF_K:
  718. /* A *= K */
  719. /* Load K to scratch register before MUL */
  720. ctx->flags |= SEEN_A;
  721. emit_load_imm(r_s0, k, ctx);
  722. emit_mul(r_A, r_A, r_s0, ctx);
  723. break;
  724. case BPF_ALU | BPF_MUL | BPF_X:
  725. /* A *= X */
  726. ctx->flags |= SEEN_A | SEEN_X;
  727. emit_mul(r_A, r_A, r_X, ctx);
  728. break;
  729. case BPF_ALU | BPF_DIV | BPF_K:
  730. /* A /= k */
  731. if (k == 1)
  732. break;
  733. if (optimize_div(&k)) {
  734. ctx->flags |= SEEN_A;
  735. emit_srl(r_A, r_A, k, ctx);
  736. break;
  737. }
  738. ctx->flags |= SEEN_A;
  739. emit_load_imm(r_s0, k, ctx);
  740. emit_div(r_A, r_s0, ctx);
  741. break;
  742. case BPF_ALU | BPF_MOD | BPF_K:
  743. /* A %= k */
  744. if (k == 1) {
  745. ctx->flags |= SEEN_A;
  746. emit_jit_reg_move(r_A, r_zero, ctx);
  747. } else {
  748. ctx->flags |= SEEN_A;
  749. emit_load_imm(r_s0, k, ctx);
  750. emit_mod(r_A, r_s0, ctx);
  751. }
  752. break;
  753. case BPF_ALU | BPF_DIV | BPF_X:
  754. /* A /= X */
  755. ctx->flags |= SEEN_X | SEEN_A;
  756. /* Check if r_X is zero */
  757. emit_bcond(MIPS_COND_EQ, r_X, r_zero,
  758. b_imm(prog->len, ctx), ctx);
  759. emit_load_imm(r_ret, 0, ctx); /* delay slot */
  760. emit_div(r_A, r_X, ctx);
  761. break;
  762. case BPF_ALU | BPF_MOD | BPF_X:
  763. /* A %= X */
  764. ctx->flags |= SEEN_X | SEEN_A;
  765. /* Check if r_X is zero */
  766. emit_bcond(MIPS_COND_EQ, r_X, r_zero,
  767. b_imm(prog->len, ctx), ctx);
  768. emit_load_imm(r_ret, 0, ctx); /* delay slot */
  769. emit_mod(r_A, r_X, ctx);
  770. break;
  771. case BPF_ALU | BPF_OR | BPF_K:
  772. /* A |= K */
  773. ctx->flags |= SEEN_A;
  774. emit_ori(r_A, r_A, k, ctx);
  775. break;
  776. case BPF_ALU | BPF_OR | BPF_X:
  777. /* A |= X */
  778. ctx->flags |= SEEN_A;
  779. emit_ori(r_A, r_A, r_X, ctx);
  780. break;
  781. case BPF_ALU | BPF_XOR | BPF_K:
  782. /* A ^= k */
  783. ctx->flags |= SEEN_A;
  784. emit_xori(r_A, r_A, k, ctx);
  785. break;
  786. case BPF_ANC | SKF_AD_ALU_XOR_X:
  787. case BPF_ALU | BPF_XOR | BPF_X:
  788. /* A ^= X */
  789. ctx->flags |= SEEN_A;
  790. emit_xor(r_A, r_A, r_X, ctx);
  791. break;
  792. case BPF_ALU | BPF_AND | BPF_K:
  793. /* A &= K */
  794. ctx->flags |= SEEN_A;
  795. emit_andi(r_A, r_A, k, ctx);
  796. break;
  797. case BPF_ALU | BPF_AND | BPF_X:
  798. /* A &= X */
  799. ctx->flags |= SEEN_A | SEEN_X;
  800. emit_and(r_A, r_A, r_X, ctx);
  801. break;
  802. case BPF_ALU | BPF_LSH | BPF_K:
  803. /* A <<= K */
  804. ctx->flags |= SEEN_A;
  805. emit_sll(r_A, r_A, k, ctx);
  806. break;
  807. case BPF_ALU | BPF_LSH | BPF_X:
  808. /* A <<= X */
  809. ctx->flags |= SEEN_A | SEEN_X;
  810. emit_sllv(r_A, r_A, r_X, ctx);
  811. break;
  812. case BPF_ALU | BPF_RSH | BPF_K:
  813. /* A >>= K */
  814. ctx->flags |= SEEN_A;
  815. emit_srl(r_A, r_A, k, ctx);
  816. break;
  817. case BPF_ALU | BPF_RSH | BPF_X:
  818. ctx->flags |= SEEN_A | SEEN_X;
  819. emit_srlv(r_A, r_A, r_X, ctx);
  820. break;
  821. case BPF_ALU | BPF_NEG:
  822. /* A = -A */
  823. ctx->flags |= SEEN_A;
  824. emit_neg(r_A, ctx);
  825. break;
  826. case BPF_JMP | BPF_JA:
  827. /* pc += K */
  828. emit_b(b_imm(i + k + 1, ctx), ctx);
  829. emit_nop(ctx);
  830. break;
  831. case BPF_JMP | BPF_JEQ | BPF_K:
  832. /* pc += ( A == K ) ? pc->jt : pc->jf */
  833. condt = MIPS_COND_EQ | MIPS_COND_K;
  834. goto jmp_cmp;
  835. case BPF_JMP | BPF_JEQ | BPF_X:
  836. ctx->flags |= SEEN_X;
  837. /* pc += ( A == X ) ? pc->jt : pc->jf */
  838. condt = MIPS_COND_EQ | MIPS_COND_X;
  839. goto jmp_cmp;
  840. case BPF_JMP | BPF_JGE | BPF_K:
  841. /* pc += ( A >= K ) ? pc->jt : pc->jf */
  842. condt = MIPS_COND_GE | MIPS_COND_K;
  843. goto jmp_cmp;
  844. case BPF_JMP | BPF_JGE | BPF_X:
  845. ctx->flags |= SEEN_X;
  846. /* pc += ( A >= X ) ? pc->jt : pc->jf */
  847. condt = MIPS_COND_GE | MIPS_COND_X;
  848. goto jmp_cmp;
  849. case BPF_JMP | BPF_JGT | BPF_K:
  850. /* pc += ( A > K ) ? pc->jt : pc->jf */
  851. condt = MIPS_COND_GT | MIPS_COND_K;
  852. goto jmp_cmp;
  853. case BPF_JMP | BPF_JGT | BPF_X:
  854. ctx->flags |= SEEN_X;
  855. /* pc += ( A > X ) ? pc->jt : pc->jf */
  856. condt = MIPS_COND_GT | MIPS_COND_X;
  857. jmp_cmp:
  858. /* Greater or Equal */
  859. if ((condt & MIPS_COND_GE) ||
  860. (condt & MIPS_COND_GT)) {
  861. if (condt & MIPS_COND_K) { /* K */
  862. ctx->flags |= SEEN_A;
  863. emit_sltiu(r_s0, r_A, k, ctx);
  864. } else { /* X */
  865. ctx->flags |= SEEN_A |
  866. SEEN_X;
  867. emit_sltu(r_s0, r_A, r_X, ctx);
  868. }
  869. /* A < (K|X) ? r_scrach = 1 */
  870. b_off = b_imm(i + inst->jf + 1, ctx);
  871. emit_bcond(MIPS_COND_NE, r_s0, r_zero, b_off,
  872. ctx);
  873. emit_nop(ctx);
  874. /* A > (K|X) ? scratch = 0 */
  875. if (condt & MIPS_COND_GT) {
  876. /* Checking for equality */
  877. ctx->flags |= SEEN_A | SEEN_X;
  878. if (condt & MIPS_COND_K)
  879. emit_load_imm(r_s0, k, ctx);
  880. else
  881. emit_jit_reg_move(r_s0, r_X,
  882. ctx);
  883. b_off = b_imm(i + inst->jf + 1, ctx);
  884. emit_bcond(MIPS_COND_EQ, r_A, r_s0,
  885. b_off, ctx);
  886. emit_nop(ctx);
  887. /* Finally, A > K|X */
  888. b_off = b_imm(i + inst->jt + 1, ctx);
  889. emit_b(b_off, ctx);
  890. emit_nop(ctx);
  891. } else {
  892. /* A >= (K|X) so jump */
  893. b_off = b_imm(i + inst->jt + 1, ctx);
  894. emit_b(b_off, ctx);
  895. emit_nop(ctx);
  896. }
  897. } else {
  898. /* A == K|X */
  899. if (condt & MIPS_COND_K) { /* K */
  900. ctx->flags |= SEEN_A;
  901. emit_load_imm(r_s0, k, ctx);
  902. /* jump true */
  903. b_off = b_imm(i + inst->jt + 1, ctx);
  904. emit_bcond(MIPS_COND_EQ, r_A, r_s0,
  905. b_off, ctx);
  906. emit_nop(ctx);
  907. /* jump false */
  908. b_off = b_imm(i + inst->jf + 1,
  909. ctx);
  910. emit_bcond(MIPS_COND_NE, r_A, r_s0,
  911. b_off, ctx);
  912. emit_nop(ctx);
  913. } else { /* X */
  914. /* jump true */
  915. ctx->flags |= SEEN_A | SEEN_X;
  916. b_off = b_imm(i + inst->jt + 1,
  917. ctx);
  918. emit_bcond(MIPS_COND_EQ, r_A, r_X,
  919. b_off, ctx);
  920. emit_nop(ctx);
  921. /* jump false */
  922. b_off = b_imm(i + inst->jf + 1, ctx);
  923. emit_bcond(MIPS_COND_NE, r_A, r_X,
  924. b_off, ctx);
  925. emit_nop(ctx);
  926. }
  927. }
  928. break;
  929. case BPF_JMP | BPF_JSET | BPF_K:
  930. ctx->flags |= SEEN_A;
  931. /* pc += (A & K) ? pc -> jt : pc -> jf */
  932. emit_load_imm(r_s1, k, ctx);
  933. emit_and(r_s0, r_A, r_s1, ctx);
  934. /* jump true */
  935. b_off = b_imm(i + inst->jt + 1, ctx);
  936. emit_bcond(MIPS_COND_NE, r_s0, r_zero, b_off, ctx);
  937. emit_nop(ctx);
  938. /* jump false */
  939. b_off = b_imm(i + inst->jf + 1, ctx);
  940. emit_b(b_off, ctx);
  941. emit_nop(ctx);
  942. break;
  943. case BPF_JMP | BPF_JSET | BPF_X:
  944. ctx->flags |= SEEN_X | SEEN_A;
  945. /* pc += (A & X) ? pc -> jt : pc -> jf */
  946. emit_and(r_s0, r_A, r_X, ctx);
  947. /* jump true */
  948. b_off = b_imm(i + inst->jt + 1, ctx);
  949. emit_bcond(MIPS_COND_NE, r_s0, r_zero, b_off, ctx);
  950. emit_nop(ctx);
  951. /* jump false */
  952. b_off = b_imm(i + inst->jf + 1, ctx);
  953. emit_b(b_off, ctx);
  954. emit_nop(ctx);
  955. break;
  956. case BPF_RET | BPF_A:
  957. ctx->flags |= SEEN_A;
  958. if (i != prog->len - 1)
  959. /*
  960. * If this is not the last instruction
  961. * then jump to the epilogue
  962. */
  963. emit_b(b_imm(prog->len, ctx), ctx);
  964. emit_reg_move(r_ret, r_A, ctx); /* delay slot */
  965. break;
  966. case BPF_RET | BPF_K:
  967. /*
  968. * It can emit two instructions so it does not fit on
  969. * the delay slot.
  970. */
  971. emit_load_imm(r_ret, k, ctx);
  972. if (i != prog->len - 1) {
  973. /*
  974. * If this is not the last instruction
  975. * then jump to the epilogue
  976. */
  977. emit_b(b_imm(prog->len, ctx), ctx);
  978. emit_nop(ctx);
  979. }
  980. break;
  981. case BPF_MISC | BPF_TAX:
  982. /* X = A */
  983. ctx->flags |= SEEN_X | SEEN_A;
  984. emit_jit_reg_move(r_X, r_A, ctx);
  985. break;
  986. case BPF_MISC | BPF_TXA:
  987. /* A = X */
  988. ctx->flags |= SEEN_A | SEEN_X;
  989. emit_jit_reg_move(r_A, r_X, ctx);
  990. break;
  991. /* AUX */
  992. case BPF_ANC | SKF_AD_PROTOCOL:
  993. /* A = ntohs(skb->protocol */
  994. ctx->flags |= SEEN_SKB | SEEN_OFF | SEEN_A;
  995. BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
  996. protocol) != 2);
  997. off = offsetof(struct sk_buff, protocol);
  998. emit_half_load(r_A, r_skb, off, ctx);
  999. #ifdef CONFIG_CPU_LITTLE_ENDIAN
  1000. /* This needs little endian fixup */
  1001. if (cpu_has_wsbh) {
  1002. /* R2 and later have the wsbh instruction */
  1003. emit_wsbh(r_A, r_A, ctx);
  1004. } else {
  1005. /* Get first byte */
  1006. emit_andi(r_tmp_imm, r_A, 0xff, ctx);
  1007. /* Shift it */
  1008. emit_sll(r_tmp, r_tmp_imm, 8, ctx);
  1009. /* Get second byte */
  1010. emit_srl(r_tmp_imm, r_A, 8, ctx);
  1011. emit_andi(r_tmp_imm, r_tmp_imm, 0xff, ctx);
  1012. /* Put everyting together in r_A */
  1013. emit_or(r_A, r_tmp, r_tmp_imm, ctx);
  1014. }
  1015. #endif
  1016. break;
  1017. case BPF_ANC | SKF_AD_CPU:
  1018. ctx->flags |= SEEN_A | SEEN_OFF;
  1019. /* A = current_thread_info()->cpu */
  1020. BUILD_BUG_ON(FIELD_SIZEOF(struct thread_info,
  1021. cpu) != 4);
  1022. off = offsetof(struct thread_info, cpu);
  1023. /* $28/gp points to the thread_info struct */
  1024. emit_load(r_A, 28, off, ctx);
  1025. break;
  1026. case BPF_ANC | SKF_AD_IFINDEX:
  1027. /* A = skb->dev->ifindex */
  1028. ctx->flags |= SEEN_SKB | SEEN_A;
  1029. off = offsetof(struct sk_buff, dev);
  1030. /* Load *dev pointer */
  1031. emit_load_ptr(r_s0, r_skb, off, ctx);
  1032. /* error (0) in the delay slot */
  1033. emit_bcond(MIPS_COND_EQ, r_s0, r_zero,
  1034. b_imm(prog->len, ctx), ctx);
  1035. emit_reg_move(r_ret, r_zero, ctx);
  1036. BUILD_BUG_ON(FIELD_SIZEOF(struct net_device,
  1037. ifindex) != 4);
  1038. off = offsetof(struct net_device, ifindex);
  1039. emit_load(r_A, r_s0, off, ctx);
  1040. break;
  1041. case BPF_ANC | SKF_AD_MARK:
  1042. ctx->flags |= SEEN_SKB | SEEN_A;
  1043. BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
  1044. off = offsetof(struct sk_buff, mark);
  1045. emit_load(r_A, r_skb, off, ctx);
  1046. break;
  1047. case BPF_ANC | SKF_AD_RXHASH:
  1048. ctx->flags |= SEEN_SKB | SEEN_A;
  1049. BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
  1050. off = offsetof(struct sk_buff, hash);
  1051. emit_load(r_A, r_skb, off, ctx);
  1052. break;
  1053. case BPF_ANC | SKF_AD_VLAN_TAG:
  1054. case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
  1055. ctx->flags |= SEEN_SKB | SEEN_A;
  1056. BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
  1057. vlan_tci) != 2);
  1058. off = offsetof(struct sk_buff, vlan_tci);
  1059. emit_half_load(r_s0, r_skb, off, ctx);
  1060. if (code == (BPF_ANC | SKF_AD_VLAN_TAG)) {
  1061. emit_andi(r_A, r_s0, (u16)~VLAN_TAG_PRESENT, ctx);
  1062. } else {
  1063. emit_andi(r_A, r_s0, VLAN_TAG_PRESENT, ctx);
  1064. /* return 1 if present */
  1065. emit_sltu(r_A, r_zero, r_A, ctx);
  1066. }
  1067. break;
  1068. case BPF_ANC | SKF_AD_PKTTYPE:
  1069. ctx->flags |= SEEN_SKB;
  1070. emit_load_byte(r_tmp, r_skb, PKT_TYPE_OFFSET(), ctx);
  1071. /* Keep only the last 3 bits */
  1072. emit_andi(r_A, r_tmp, PKT_TYPE_MAX, ctx);
  1073. #ifdef __BIG_ENDIAN_BITFIELD
  1074. /* Get the actual packet type to the lower 3 bits */
  1075. emit_srl(r_A, r_A, 5, ctx);
  1076. #endif
  1077. break;
  1078. case BPF_ANC | SKF_AD_QUEUE:
  1079. ctx->flags |= SEEN_SKB | SEEN_A;
  1080. BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
  1081. queue_mapping) != 2);
  1082. BUILD_BUG_ON(offsetof(struct sk_buff,
  1083. queue_mapping) > 0xff);
  1084. off = offsetof(struct sk_buff, queue_mapping);
  1085. emit_half_load(r_A, r_skb, off, ctx);
  1086. break;
  1087. default:
  1088. pr_debug("%s: Unhandled opcode: 0x%02x\n", __FILE__,
  1089. inst->code);
  1090. return -1;
  1091. }
  1092. }
  1093. /* compute offsets only during the first pass */
  1094. if (ctx->target == NULL)
  1095. ctx->offsets[i] = ctx->idx * 4;
  1096. return 0;
  1097. }
  1098. int bpf_jit_enable __read_mostly;
  1099. void bpf_jit_compile(struct bpf_prog *fp)
  1100. {
  1101. struct jit_ctx ctx;
  1102. unsigned int alloc_size, tmp_idx;
  1103. if (!bpf_jit_enable)
  1104. return;
  1105. memset(&ctx, 0, sizeof(ctx));
  1106. ctx.offsets = kcalloc(fp->len, sizeof(*ctx.offsets), GFP_KERNEL);
  1107. if (ctx.offsets == NULL)
  1108. return;
  1109. ctx.skf = fp;
  1110. if (build_body(&ctx))
  1111. goto out;
  1112. tmp_idx = ctx.idx;
  1113. build_prologue(&ctx);
  1114. ctx.prologue_bytes = (ctx.idx - tmp_idx) * 4;
  1115. /* just to complete the ctx.idx count */
  1116. build_epilogue(&ctx);
  1117. alloc_size = 4 * ctx.idx;
  1118. ctx.target = module_alloc(alloc_size);
  1119. if (ctx.target == NULL)
  1120. goto out;
  1121. /* Clean it */
  1122. memset(ctx.target, 0, alloc_size);
  1123. ctx.idx = 0;
  1124. /* Generate the actual JIT code */
  1125. build_prologue(&ctx);
  1126. build_body(&ctx);
  1127. build_epilogue(&ctx);
  1128. /* Update the icache */
  1129. flush_icache_range((ptr)ctx.target, (ptr)(ctx.target + ctx.idx));
  1130. if (bpf_jit_enable > 1)
  1131. /* Dump JIT code */
  1132. bpf_jit_dump(fp->len, alloc_size, 2, ctx.target);
  1133. fp->bpf_func = (void *)ctx.target;
  1134. fp->jited = 1;
  1135. out:
  1136. kfree(ctx.offsets);
  1137. }
  1138. void bpf_jit_free(struct bpf_prog *fp)
  1139. {
  1140. if (fp->jited)
  1141. module_memfree(fp->bpf_func);
  1142. bpf_prog_unlock_free(fp);
  1143. }