test_verifier.c 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311
  1. /*
  2. * Testsuite for eBPF verifier
  3. *
  4. * Copyright (c) 2014 PLUMgrid, http://plumgrid.com
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of version 2 of the GNU General Public
  8. * License as published by the Free Software Foundation.
  9. */
  10. #include <stdio.h>
  11. #include <unistd.h>
  12. #include <linux/bpf.h>
  13. #include <errno.h>
  14. #include <linux/unistd.h>
  15. #include <string.h>
  16. #include <linux/filter.h>
  17. #include <stddef.h>
  18. #include <stdbool.h>
  19. #include <sys/resource.h>
  20. #include "libbpf.h"
  21. #define MAX_INSNS 512
  22. #define ARRAY_SIZE(x) (sizeof(x) / sizeof(*(x)))
  23. #define MAX_FIXUPS 8
  24. struct bpf_test {
  25. const char *descr;
  26. struct bpf_insn insns[MAX_INSNS];
  27. int fixup[MAX_FIXUPS];
  28. int prog_array_fixup[MAX_FIXUPS];
  29. const char *errstr;
  30. const char *errstr_unpriv;
  31. enum {
  32. UNDEF,
  33. ACCEPT,
  34. REJECT
  35. } result, result_unpriv;
  36. enum bpf_prog_type prog_type;
  37. };
  38. static struct bpf_test tests[] = {
  39. {
  40. "add+sub+mul",
  41. .insns = {
  42. BPF_MOV64_IMM(BPF_REG_1, 1),
  43. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 2),
  44. BPF_MOV64_IMM(BPF_REG_2, 3),
  45. BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_2),
  46. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -1),
  47. BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 3),
  48. BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
  49. BPF_EXIT_INSN(),
  50. },
  51. .result = ACCEPT,
  52. },
  53. {
  54. "unreachable",
  55. .insns = {
  56. BPF_EXIT_INSN(),
  57. BPF_EXIT_INSN(),
  58. },
  59. .errstr = "unreachable",
  60. .result = REJECT,
  61. },
  62. {
  63. "unreachable2",
  64. .insns = {
  65. BPF_JMP_IMM(BPF_JA, 0, 0, 1),
  66. BPF_JMP_IMM(BPF_JA, 0, 0, 0),
  67. BPF_EXIT_INSN(),
  68. },
  69. .errstr = "unreachable",
  70. .result = REJECT,
  71. },
  72. {
  73. "out of range jump",
  74. .insns = {
  75. BPF_JMP_IMM(BPF_JA, 0, 0, 1),
  76. BPF_EXIT_INSN(),
  77. },
  78. .errstr = "jump out of range",
  79. .result = REJECT,
  80. },
  81. {
  82. "out of range jump2",
  83. .insns = {
  84. BPF_JMP_IMM(BPF_JA, 0, 0, -2),
  85. BPF_EXIT_INSN(),
  86. },
  87. .errstr = "jump out of range",
  88. .result = REJECT,
  89. },
  90. {
  91. "test1 ld_imm64",
  92. .insns = {
  93. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
  94. BPF_LD_IMM64(BPF_REG_0, 0),
  95. BPF_LD_IMM64(BPF_REG_0, 0),
  96. BPF_LD_IMM64(BPF_REG_0, 1),
  97. BPF_LD_IMM64(BPF_REG_0, 1),
  98. BPF_MOV64_IMM(BPF_REG_0, 2),
  99. BPF_EXIT_INSN(),
  100. },
  101. .errstr = "invalid BPF_LD_IMM insn",
  102. .errstr_unpriv = "R1 pointer comparison",
  103. .result = REJECT,
  104. },
  105. {
  106. "test2 ld_imm64",
  107. .insns = {
  108. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
  109. BPF_LD_IMM64(BPF_REG_0, 0),
  110. BPF_LD_IMM64(BPF_REG_0, 0),
  111. BPF_LD_IMM64(BPF_REG_0, 1),
  112. BPF_LD_IMM64(BPF_REG_0, 1),
  113. BPF_EXIT_INSN(),
  114. },
  115. .errstr = "invalid BPF_LD_IMM insn",
  116. .errstr_unpriv = "R1 pointer comparison",
  117. .result = REJECT,
  118. },
  119. {
  120. "test3 ld_imm64",
  121. .insns = {
  122. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
  123. BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
  124. BPF_LD_IMM64(BPF_REG_0, 0),
  125. BPF_LD_IMM64(BPF_REG_0, 0),
  126. BPF_LD_IMM64(BPF_REG_0, 1),
  127. BPF_LD_IMM64(BPF_REG_0, 1),
  128. BPF_EXIT_INSN(),
  129. },
  130. .errstr = "invalid bpf_ld_imm64 insn",
  131. .result = REJECT,
  132. },
  133. {
  134. "test4 ld_imm64",
  135. .insns = {
  136. BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
  137. BPF_EXIT_INSN(),
  138. },
  139. .errstr = "invalid bpf_ld_imm64 insn",
  140. .result = REJECT,
  141. },
  142. {
  143. "test5 ld_imm64",
  144. .insns = {
  145. BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
  146. },
  147. .errstr = "invalid bpf_ld_imm64 insn",
  148. .result = REJECT,
  149. },
  150. {
  151. "no bpf_exit",
  152. .insns = {
  153. BPF_ALU64_REG(BPF_MOV, BPF_REG_0, BPF_REG_2),
  154. },
  155. .errstr = "jump out of range",
  156. .result = REJECT,
  157. },
  158. {
  159. "loop (back-edge)",
  160. .insns = {
  161. BPF_JMP_IMM(BPF_JA, 0, 0, -1),
  162. BPF_EXIT_INSN(),
  163. },
  164. .errstr = "back-edge",
  165. .result = REJECT,
  166. },
  167. {
  168. "loop2 (back-edge)",
  169. .insns = {
  170. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  171. BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
  172. BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
  173. BPF_JMP_IMM(BPF_JA, 0, 0, -4),
  174. BPF_EXIT_INSN(),
  175. },
  176. .errstr = "back-edge",
  177. .result = REJECT,
  178. },
  179. {
  180. "conditional loop",
  181. .insns = {
  182. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  183. BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
  184. BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
  185. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
  186. BPF_EXIT_INSN(),
  187. },
  188. .errstr = "back-edge",
  189. .result = REJECT,
  190. },
  191. {
  192. "read uninitialized register",
  193. .insns = {
  194. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  195. BPF_EXIT_INSN(),
  196. },
  197. .errstr = "R2 !read_ok",
  198. .result = REJECT,
  199. },
  200. {
  201. "read invalid register",
  202. .insns = {
  203. BPF_MOV64_REG(BPF_REG_0, -1),
  204. BPF_EXIT_INSN(),
  205. },
  206. .errstr = "R15 is invalid",
  207. .result = REJECT,
  208. },
  209. {
  210. "program doesn't init R0 before exit",
  211. .insns = {
  212. BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1),
  213. BPF_EXIT_INSN(),
  214. },
  215. .errstr = "R0 !read_ok",
  216. .result = REJECT,
  217. },
  218. {
  219. "program doesn't init R0 before exit in all branches",
  220. .insns = {
  221. BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
  222. BPF_MOV64_IMM(BPF_REG_0, 1),
  223. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
  224. BPF_EXIT_INSN(),
  225. },
  226. .errstr = "R0 !read_ok",
  227. .errstr_unpriv = "R1 pointer comparison",
  228. .result = REJECT,
  229. },
  230. {
  231. "stack out of bounds",
  232. .insns = {
  233. BPF_ST_MEM(BPF_DW, BPF_REG_10, 8, 0),
  234. BPF_EXIT_INSN(),
  235. },
  236. .errstr = "invalid stack",
  237. .result = REJECT,
  238. },
  239. {
  240. "invalid call insn1",
  241. .insns = {
  242. BPF_RAW_INSN(BPF_JMP | BPF_CALL | BPF_X, 0, 0, 0, 0),
  243. BPF_EXIT_INSN(),
  244. },
  245. .errstr = "BPF_CALL uses reserved",
  246. .result = REJECT,
  247. },
  248. {
  249. "invalid call insn2",
  250. .insns = {
  251. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 1, 0),
  252. BPF_EXIT_INSN(),
  253. },
  254. .errstr = "BPF_CALL uses reserved",
  255. .result = REJECT,
  256. },
  257. {
  258. "invalid function call",
  259. .insns = {
  260. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1234567),
  261. BPF_EXIT_INSN(),
  262. },
  263. .errstr = "invalid func 1234567",
  264. .result = REJECT,
  265. },
  266. {
  267. "uninitialized stack1",
  268. .insns = {
  269. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  270. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  271. BPF_LD_MAP_FD(BPF_REG_1, 0),
  272. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
  273. BPF_EXIT_INSN(),
  274. },
  275. .fixup = {2},
  276. .errstr = "invalid indirect read from stack",
  277. .result = REJECT,
  278. },
  279. {
  280. "uninitialized stack2",
  281. .insns = {
  282. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  283. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -8),
  284. BPF_EXIT_INSN(),
  285. },
  286. .errstr = "invalid read from stack",
  287. .result = REJECT,
  288. },
  289. {
  290. "check valid spill/fill",
  291. .insns = {
  292. /* spill R1(ctx) into stack */
  293. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
  294. /* fill it back into R2 */
  295. BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
  296. /* should be able to access R0 = *(R2 + 8) */
  297. /* BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8), */
  298. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  299. BPF_EXIT_INSN(),
  300. },
  301. .errstr_unpriv = "R0 leaks addr",
  302. .result = ACCEPT,
  303. .result_unpriv = REJECT,
  304. },
  305. {
  306. "check corrupted spill/fill",
  307. .insns = {
  308. /* spill R1(ctx) into stack */
  309. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
  310. /* mess up with R1 pointer on stack */
  311. BPF_ST_MEM(BPF_B, BPF_REG_10, -7, 0x23),
  312. /* fill back into R0 should fail */
  313. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
  314. BPF_EXIT_INSN(),
  315. },
  316. .errstr_unpriv = "attempt to corrupt spilled",
  317. .errstr = "corrupted spill",
  318. .result = REJECT,
  319. },
  320. {
  321. "invalid src register in STX",
  322. .insns = {
  323. BPF_STX_MEM(BPF_B, BPF_REG_10, -1, -1),
  324. BPF_EXIT_INSN(),
  325. },
  326. .errstr = "R15 is invalid",
  327. .result = REJECT,
  328. },
  329. {
  330. "invalid dst register in STX",
  331. .insns = {
  332. BPF_STX_MEM(BPF_B, 14, BPF_REG_10, -1),
  333. BPF_EXIT_INSN(),
  334. },
  335. .errstr = "R14 is invalid",
  336. .result = REJECT,
  337. },
  338. {
  339. "invalid dst register in ST",
  340. .insns = {
  341. BPF_ST_MEM(BPF_B, 14, -1, -1),
  342. BPF_EXIT_INSN(),
  343. },
  344. .errstr = "R14 is invalid",
  345. .result = REJECT,
  346. },
  347. {
  348. "invalid src register in LDX",
  349. .insns = {
  350. BPF_LDX_MEM(BPF_B, BPF_REG_0, 12, 0),
  351. BPF_EXIT_INSN(),
  352. },
  353. .errstr = "R12 is invalid",
  354. .result = REJECT,
  355. },
  356. {
  357. "invalid dst register in LDX",
  358. .insns = {
  359. BPF_LDX_MEM(BPF_B, 11, BPF_REG_1, 0),
  360. BPF_EXIT_INSN(),
  361. },
  362. .errstr = "R11 is invalid",
  363. .result = REJECT,
  364. },
  365. {
  366. "junk insn",
  367. .insns = {
  368. BPF_RAW_INSN(0, 0, 0, 0, 0),
  369. BPF_EXIT_INSN(),
  370. },
  371. .errstr = "invalid BPF_LD_IMM",
  372. .result = REJECT,
  373. },
  374. {
  375. "junk insn2",
  376. .insns = {
  377. BPF_RAW_INSN(1, 0, 0, 0, 0),
  378. BPF_EXIT_INSN(),
  379. },
  380. .errstr = "BPF_LDX uses reserved fields",
  381. .result = REJECT,
  382. },
  383. {
  384. "junk insn3",
  385. .insns = {
  386. BPF_RAW_INSN(-1, 0, 0, 0, 0),
  387. BPF_EXIT_INSN(),
  388. },
  389. .errstr = "invalid BPF_ALU opcode f0",
  390. .result = REJECT,
  391. },
  392. {
  393. "junk insn4",
  394. .insns = {
  395. BPF_RAW_INSN(-1, -1, -1, -1, -1),
  396. BPF_EXIT_INSN(),
  397. },
  398. .errstr = "invalid BPF_ALU opcode f0",
  399. .result = REJECT,
  400. },
  401. {
  402. "junk insn5",
  403. .insns = {
  404. BPF_RAW_INSN(0x7f, -1, -1, -1, -1),
  405. BPF_EXIT_INSN(),
  406. },
  407. .errstr = "BPF_ALU uses reserved fields",
  408. .result = REJECT,
  409. },
  410. {
  411. "misaligned read from stack",
  412. .insns = {
  413. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  414. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -4),
  415. BPF_EXIT_INSN(),
  416. },
  417. .errstr = "misaligned access",
  418. .result = REJECT,
  419. },
  420. {
  421. "invalid map_fd for function call",
  422. .insns = {
  423. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  424. BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_10),
  425. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  426. BPF_LD_MAP_FD(BPF_REG_1, 0),
  427. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_delete_elem),
  428. BPF_EXIT_INSN(),
  429. },
  430. .errstr = "fd 0 is not pointing to valid bpf_map",
  431. .result = REJECT,
  432. },
  433. {
  434. "don't check return value before access",
  435. .insns = {
  436. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  437. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  438. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  439. BPF_LD_MAP_FD(BPF_REG_1, 0),
  440. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
  441. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
  442. BPF_EXIT_INSN(),
  443. },
  444. .fixup = {3},
  445. .errstr = "R0 invalid mem access 'map_value_or_null'",
  446. .result = REJECT,
  447. },
  448. {
  449. "access memory with incorrect alignment",
  450. .insns = {
  451. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  452. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  453. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  454. BPF_LD_MAP_FD(BPF_REG_1, 0),
  455. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
  456. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
  457. BPF_ST_MEM(BPF_DW, BPF_REG_0, 4, 0),
  458. BPF_EXIT_INSN(),
  459. },
  460. .fixup = {3},
  461. .errstr = "misaligned access",
  462. .result = REJECT,
  463. },
  464. {
  465. "sometimes access memory with incorrect alignment",
  466. .insns = {
  467. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  468. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  469. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  470. BPF_LD_MAP_FD(BPF_REG_1, 0),
  471. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
  472. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
  473. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
  474. BPF_EXIT_INSN(),
  475. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 1),
  476. BPF_EXIT_INSN(),
  477. },
  478. .fixup = {3},
  479. .errstr = "R0 invalid mem access",
  480. .errstr_unpriv = "R0 leaks addr",
  481. .result = REJECT,
  482. },
  483. {
  484. "jump test 1",
  485. .insns = {
  486. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  487. BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -8),
  488. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
  489. BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
  490. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 1),
  491. BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 1),
  492. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 1),
  493. BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 2),
  494. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 1),
  495. BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 3),
  496. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 1),
  497. BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 4),
  498. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
  499. BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 5),
  500. BPF_MOV64_IMM(BPF_REG_0, 0),
  501. BPF_EXIT_INSN(),
  502. },
  503. .errstr_unpriv = "R1 pointer comparison",
  504. .result_unpriv = REJECT,
  505. .result = ACCEPT,
  506. },
  507. {
  508. "jump test 2",
  509. .insns = {
  510. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  511. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
  512. BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
  513. BPF_JMP_IMM(BPF_JA, 0, 0, 14),
  514. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 2),
  515. BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
  516. BPF_JMP_IMM(BPF_JA, 0, 0, 11),
  517. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 2),
  518. BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
  519. BPF_JMP_IMM(BPF_JA, 0, 0, 8),
  520. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 2),
  521. BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
  522. BPF_JMP_IMM(BPF_JA, 0, 0, 5),
  523. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 2),
  524. BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
  525. BPF_JMP_IMM(BPF_JA, 0, 0, 2),
  526. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
  527. BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
  528. BPF_MOV64_IMM(BPF_REG_0, 0),
  529. BPF_EXIT_INSN(),
  530. },
  531. .errstr_unpriv = "R1 pointer comparison",
  532. .result_unpriv = REJECT,
  533. .result = ACCEPT,
  534. },
  535. {
  536. "jump test 3",
  537. .insns = {
  538. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  539. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
  540. BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
  541. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  542. BPF_JMP_IMM(BPF_JA, 0, 0, 19),
  543. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 3),
  544. BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
  545. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
  546. BPF_JMP_IMM(BPF_JA, 0, 0, 15),
  547. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 3),
  548. BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
  549. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -32),
  550. BPF_JMP_IMM(BPF_JA, 0, 0, 11),
  551. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 3),
  552. BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
  553. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -40),
  554. BPF_JMP_IMM(BPF_JA, 0, 0, 7),
  555. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 3),
  556. BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
  557. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48),
  558. BPF_JMP_IMM(BPF_JA, 0, 0, 3),
  559. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 0),
  560. BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
  561. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -56),
  562. BPF_LD_MAP_FD(BPF_REG_1, 0),
  563. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_delete_elem),
  564. BPF_EXIT_INSN(),
  565. },
  566. .fixup = {24},
  567. .errstr_unpriv = "R1 pointer comparison",
  568. .result_unpriv = REJECT,
  569. .result = ACCEPT,
  570. },
  571. {
  572. "jump test 4",
  573. .insns = {
  574. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
  575. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
  576. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
  577. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
  578. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
  579. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
  580. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
  581. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
  582. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
  583. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
  584. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
  585. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
  586. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
  587. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
  588. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
  589. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
  590. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
  591. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
  592. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
  593. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
  594. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
  595. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
  596. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
  597. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
  598. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
  599. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
  600. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
  601. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
  602. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
  603. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
  604. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
  605. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
  606. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
  607. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
  608. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
  609. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
  610. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
  611. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
  612. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
  613. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
  614. BPF_MOV64_IMM(BPF_REG_0, 0),
  615. BPF_EXIT_INSN(),
  616. },
  617. .errstr_unpriv = "R1 pointer comparison",
  618. .result_unpriv = REJECT,
  619. .result = ACCEPT,
  620. },
  621. {
  622. "jump test 5",
  623. .insns = {
  624. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  625. BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
  626. BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
  627. BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
  628. BPF_JMP_IMM(BPF_JA, 0, 0, 2),
  629. BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
  630. BPF_JMP_IMM(BPF_JA, 0, 0, 0),
  631. BPF_MOV64_IMM(BPF_REG_0, 0),
  632. BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
  633. BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
  634. BPF_JMP_IMM(BPF_JA, 0, 0, 2),
  635. BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
  636. BPF_JMP_IMM(BPF_JA, 0, 0, 0),
  637. BPF_MOV64_IMM(BPF_REG_0, 0),
  638. BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
  639. BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
  640. BPF_JMP_IMM(BPF_JA, 0, 0, 2),
  641. BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
  642. BPF_JMP_IMM(BPF_JA, 0, 0, 0),
  643. BPF_MOV64_IMM(BPF_REG_0, 0),
  644. BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
  645. BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
  646. BPF_JMP_IMM(BPF_JA, 0, 0, 2),
  647. BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
  648. BPF_JMP_IMM(BPF_JA, 0, 0, 0),
  649. BPF_MOV64_IMM(BPF_REG_0, 0),
  650. BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
  651. BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
  652. BPF_JMP_IMM(BPF_JA, 0, 0, 2),
  653. BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
  654. BPF_JMP_IMM(BPF_JA, 0, 0, 0),
  655. BPF_MOV64_IMM(BPF_REG_0, 0),
  656. BPF_EXIT_INSN(),
  657. },
  658. .errstr_unpriv = "R1 pointer comparison",
  659. .result_unpriv = REJECT,
  660. .result = ACCEPT,
  661. },
  662. {
  663. "access skb fields ok",
  664. .insns = {
  665. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  666. offsetof(struct __sk_buff, len)),
  667. BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
  668. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  669. offsetof(struct __sk_buff, mark)),
  670. BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
  671. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  672. offsetof(struct __sk_buff, pkt_type)),
  673. BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
  674. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  675. offsetof(struct __sk_buff, queue_mapping)),
  676. BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
  677. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  678. offsetof(struct __sk_buff, protocol)),
  679. BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
  680. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  681. offsetof(struct __sk_buff, vlan_present)),
  682. BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
  683. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  684. offsetof(struct __sk_buff, vlan_tci)),
  685. BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
  686. BPF_EXIT_INSN(),
  687. },
  688. .result = ACCEPT,
  689. },
  690. {
  691. "access skb fields bad1",
  692. .insns = {
  693. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -4),
  694. BPF_EXIT_INSN(),
  695. },
  696. .errstr = "invalid bpf_context access",
  697. .result = REJECT,
  698. },
  699. {
  700. "access skb fields bad2",
  701. .insns = {
  702. BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 9),
  703. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  704. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  705. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  706. BPF_LD_MAP_FD(BPF_REG_1, 0),
  707. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
  708. BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
  709. BPF_EXIT_INSN(),
  710. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  711. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  712. offsetof(struct __sk_buff, pkt_type)),
  713. BPF_EXIT_INSN(),
  714. },
  715. .fixup = {4},
  716. .errstr = "different pointers",
  717. .errstr_unpriv = "R1 pointer comparison",
  718. .result = REJECT,
  719. },
  720. {
  721. "access skb fields bad3",
  722. .insns = {
  723. BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
  724. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  725. offsetof(struct __sk_buff, pkt_type)),
  726. BPF_EXIT_INSN(),
  727. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  728. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  729. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  730. BPF_LD_MAP_FD(BPF_REG_1, 0),
  731. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
  732. BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
  733. BPF_EXIT_INSN(),
  734. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  735. BPF_JMP_IMM(BPF_JA, 0, 0, -12),
  736. },
  737. .fixup = {6},
  738. .errstr = "different pointers",
  739. .errstr_unpriv = "R1 pointer comparison",
  740. .result = REJECT,
  741. },
  742. {
  743. "access skb fields bad4",
  744. .insns = {
  745. BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 3),
  746. BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
  747. offsetof(struct __sk_buff, len)),
  748. BPF_MOV64_IMM(BPF_REG_0, 0),
  749. BPF_EXIT_INSN(),
  750. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  751. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  752. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  753. BPF_LD_MAP_FD(BPF_REG_1, 0),
  754. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
  755. BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
  756. BPF_EXIT_INSN(),
  757. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  758. BPF_JMP_IMM(BPF_JA, 0, 0, -13),
  759. },
  760. .fixup = {7},
  761. .errstr = "different pointers",
  762. .errstr_unpriv = "R1 pointer comparison",
  763. .result = REJECT,
  764. },
  765. {
  766. "check skb->mark is not writeable by sockets",
  767. .insns = {
  768. BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
  769. offsetof(struct __sk_buff, mark)),
  770. BPF_EXIT_INSN(),
  771. },
  772. .errstr = "invalid bpf_context access",
  773. .errstr_unpriv = "R1 leaks addr",
  774. .result = REJECT,
  775. },
  776. {
  777. "check skb->tc_index is not writeable by sockets",
  778. .insns = {
  779. BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
  780. offsetof(struct __sk_buff, tc_index)),
  781. BPF_EXIT_INSN(),
  782. },
  783. .errstr = "invalid bpf_context access",
  784. .errstr_unpriv = "R1 leaks addr",
  785. .result = REJECT,
  786. },
  787. {
  788. "check non-u32 access to cb",
  789. .insns = {
  790. BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_1,
  791. offsetof(struct __sk_buff, cb[0])),
  792. BPF_EXIT_INSN(),
  793. },
  794. .errstr = "invalid bpf_context access",
  795. .errstr_unpriv = "R1 leaks addr",
  796. .result = REJECT,
  797. },
  798. {
  799. "check out of range skb->cb access",
  800. .insns = {
  801. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  802. offsetof(struct __sk_buff, cb[0]) + 256),
  803. BPF_EXIT_INSN(),
  804. },
  805. .errstr = "invalid bpf_context access",
  806. .errstr_unpriv = "",
  807. .result = REJECT,
  808. .prog_type = BPF_PROG_TYPE_SCHED_ACT,
  809. },
  810. {
  811. "write skb fields from socket prog",
  812. .insns = {
  813. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  814. offsetof(struct __sk_buff, cb[4])),
  815. BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
  816. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  817. offsetof(struct __sk_buff, mark)),
  818. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  819. offsetof(struct __sk_buff, tc_index)),
  820. BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
  821. BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
  822. offsetof(struct __sk_buff, cb[0])),
  823. BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
  824. offsetof(struct __sk_buff, cb[2])),
  825. BPF_EXIT_INSN(),
  826. },
  827. .result = ACCEPT,
  828. .errstr_unpriv = "R1 leaks addr",
  829. .result_unpriv = REJECT,
  830. },
  831. {
  832. "write skb fields from tc_cls_act prog",
  833. .insns = {
  834. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  835. offsetof(struct __sk_buff, cb[0])),
  836. BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
  837. offsetof(struct __sk_buff, mark)),
  838. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  839. offsetof(struct __sk_buff, tc_index)),
  840. BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
  841. offsetof(struct __sk_buff, tc_index)),
  842. BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
  843. offsetof(struct __sk_buff, cb[3])),
  844. BPF_EXIT_INSN(),
  845. },
  846. .errstr_unpriv = "",
  847. .result_unpriv = REJECT,
  848. .result = ACCEPT,
  849. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  850. },
  851. {
  852. "PTR_TO_STACK store/load",
  853. .insns = {
  854. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  855. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
  856. BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
  857. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
  858. BPF_EXIT_INSN(),
  859. },
  860. .result = ACCEPT,
  861. },
  862. {
  863. "PTR_TO_STACK store/load - bad alignment on off",
  864. .insns = {
  865. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  866. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
  867. BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
  868. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
  869. BPF_EXIT_INSN(),
  870. },
  871. .result = REJECT,
  872. .errstr = "misaligned access off -6 size 8",
  873. },
  874. {
  875. "PTR_TO_STACK store/load - bad alignment on reg",
  876. .insns = {
  877. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  878. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
  879. BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
  880. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
  881. BPF_EXIT_INSN(),
  882. },
  883. .result = REJECT,
  884. .errstr = "misaligned access off -2 size 8",
  885. },
  886. {
  887. "PTR_TO_STACK store/load - out of bounds low",
  888. .insns = {
  889. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  890. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -80000),
  891. BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
  892. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
  893. BPF_EXIT_INSN(),
  894. },
  895. .result = REJECT,
  896. .errstr = "invalid stack off=-79992 size=8",
  897. },
  898. {
  899. "PTR_TO_STACK store/load - out of bounds high",
  900. .insns = {
  901. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  902. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
  903. BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
  904. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
  905. BPF_EXIT_INSN(),
  906. },
  907. .result = REJECT,
  908. .errstr = "invalid stack off=0 size=8",
  909. },
  910. {
  911. "unpriv: return pointer",
  912. .insns = {
  913. BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
  914. BPF_EXIT_INSN(),
  915. },
  916. .result = ACCEPT,
  917. .result_unpriv = REJECT,
  918. .errstr_unpriv = "R0 leaks addr",
  919. },
  920. {
  921. "unpriv: add const to pointer",
  922. .insns = {
  923. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
  924. BPF_MOV64_IMM(BPF_REG_0, 0),
  925. BPF_EXIT_INSN(),
  926. },
  927. .result = ACCEPT,
  928. .result_unpriv = REJECT,
  929. .errstr_unpriv = "R1 pointer arithmetic",
  930. },
  931. {
  932. "unpriv: add pointer to pointer",
  933. .insns = {
  934. BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
  935. BPF_MOV64_IMM(BPF_REG_0, 0),
  936. BPF_EXIT_INSN(),
  937. },
  938. .result = ACCEPT,
  939. .result_unpriv = REJECT,
  940. .errstr_unpriv = "R1 pointer arithmetic",
  941. },
  942. {
  943. "unpriv: neg pointer",
  944. .insns = {
  945. BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0),
  946. BPF_MOV64_IMM(BPF_REG_0, 0),
  947. BPF_EXIT_INSN(),
  948. },
  949. .result = ACCEPT,
  950. .result_unpriv = REJECT,
  951. .errstr_unpriv = "R1 pointer arithmetic",
  952. },
  953. {
  954. "unpriv: cmp pointer with const",
  955. .insns = {
  956. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
  957. BPF_MOV64_IMM(BPF_REG_0, 0),
  958. BPF_EXIT_INSN(),
  959. },
  960. .result = ACCEPT,
  961. .result_unpriv = REJECT,
  962. .errstr_unpriv = "R1 pointer comparison",
  963. },
  964. {
  965. "unpriv: cmp pointer with pointer",
  966. .insns = {
  967. BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
  968. BPF_MOV64_IMM(BPF_REG_0, 0),
  969. BPF_EXIT_INSN(),
  970. },
  971. .result = ACCEPT,
  972. .result_unpriv = REJECT,
  973. .errstr_unpriv = "R10 pointer comparison",
  974. },
  975. {
  976. "unpriv: check that printk is disallowed",
  977. .insns = {
  978. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  979. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  980. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
  981. BPF_MOV64_IMM(BPF_REG_2, 8),
  982. BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
  983. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_trace_printk),
  984. BPF_MOV64_IMM(BPF_REG_0, 0),
  985. BPF_EXIT_INSN(),
  986. },
  987. .errstr_unpriv = "unknown func 6",
  988. .result_unpriv = REJECT,
  989. .result = ACCEPT,
  990. },
  991. {
  992. "unpriv: pass pointer to helper function",
  993. .insns = {
  994. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  995. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  996. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  997. BPF_LD_MAP_FD(BPF_REG_1, 0),
  998. BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
  999. BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
  1000. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_update_elem),
  1001. BPF_MOV64_IMM(BPF_REG_0, 0),
  1002. BPF_EXIT_INSN(),
  1003. },
  1004. .fixup = {3},
  1005. .errstr_unpriv = "R4 leaks addr",
  1006. .result_unpriv = REJECT,
  1007. .result = ACCEPT,
  1008. },
  1009. {
  1010. "unpriv: indirectly pass pointer on stack to helper function",
  1011. .insns = {
  1012. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
  1013. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  1014. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  1015. BPF_LD_MAP_FD(BPF_REG_1, 0),
  1016. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
  1017. BPF_MOV64_IMM(BPF_REG_0, 0),
  1018. BPF_EXIT_INSN(),
  1019. },
  1020. .fixup = {3},
  1021. .errstr = "invalid indirect read from stack off -8+0 size 8",
  1022. .result = REJECT,
  1023. },
  1024. {
  1025. "unpriv: mangle pointer on stack 1",
  1026. .insns = {
  1027. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
  1028. BPF_ST_MEM(BPF_W, BPF_REG_10, -8, 0),
  1029. BPF_MOV64_IMM(BPF_REG_0, 0),
  1030. BPF_EXIT_INSN(),
  1031. },
  1032. .errstr_unpriv = "attempt to corrupt spilled",
  1033. .result_unpriv = REJECT,
  1034. .result = ACCEPT,
  1035. },
  1036. {
  1037. "unpriv: mangle pointer on stack 2",
  1038. .insns = {
  1039. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
  1040. BPF_ST_MEM(BPF_B, BPF_REG_10, -1, 0),
  1041. BPF_MOV64_IMM(BPF_REG_0, 0),
  1042. BPF_EXIT_INSN(),
  1043. },
  1044. .errstr_unpriv = "attempt to corrupt spilled",
  1045. .result_unpriv = REJECT,
  1046. .result = ACCEPT,
  1047. },
  1048. {
  1049. "unpriv: read pointer from stack in small chunks",
  1050. .insns = {
  1051. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
  1052. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
  1053. BPF_MOV64_IMM(BPF_REG_0, 0),
  1054. BPF_EXIT_INSN(),
  1055. },
  1056. .errstr = "invalid size",
  1057. .result = REJECT,
  1058. },
  1059. {
  1060. "unpriv: write pointer into ctx",
  1061. .insns = {
  1062. BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
  1063. BPF_MOV64_IMM(BPF_REG_0, 0),
  1064. BPF_EXIT_INSN(),
  1065. },
  1066. .errstr_unpriv = "R1 leaks addr",
  1067. .result_unpriv = REJECT,
  1068. .errstr = "invalid bpf_context access",
  1069. .result = REJECT,
  1070. },
  1071. {
  1072. "unpriv: write pointer into map elem value",
  1073. .insns = {
  1074. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  1075. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  1076. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  1077. BPF_LD_MAP_FD(BPF_REG_1, 0),
  1078. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
  1079. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
  1080. BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
  1081. BPF_EXIT_INSN(),
  1082. },
  1083. .fixup = {3},
  1084. .errstr_unpriv = "R0 leaks addr",
  1085. .result_unpriv = REJECT,
  1086. .result = ACCEPT,
  1087. },
  1088. {
  1089. "unpriv: partial copy of pointer",
  1090. .insns = {
  1091. BPF_MOV32_REG(BPF_REG_1, BPF_REG_10),
  1092. BPF_MOV64_IMM(BPF_REG_0, 0),
  1093. BPF_EXIT_INSN(),
  1094. },
  1095. .errstr_unpriv = "R10 partial copy",
  1096. .result_unpriv = REJECT,
  1097. .result = ACCEPT,
  1098. },
  1099. {
  1100. "unpriv: pass pointer to tail_call",
  1101. .insns = {
  1102. BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
  1103. BPF_LD_MAP_FD(BPF_REG_2, 0),
  1104. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
  1105. BPF_MOV64_IMM(BPF_REG_0, 0),
  1106. BPF_EXIT_INSN(),
  1107. },
  1108. .prog_array_fixup = {1},
  1109. .errstr_unpriv = "R3 leaks addr into helper",
  1110. .result_unpriv = REJECT,
  1111. .result = ACCEPT,
  1112. },
  1113. {
  1114. "unpriv: cmp map pointer with zero",
  1115. .insns = {
  1116. BPF_MOV64_IMM(BPF_REG_1, 0),
  1117. BPF_LD_MAP_FD(BPF_REG_1, 0),
  1118. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
  1119. BPF_MOV64_IMM(BPF_REG_0, 0),
  1120. BPF_EXIT_INSN(),
  1121. },
  1122. .fixup = {1},
  1123. .errstr_unpriv = "R1 pointer comparison",
  1124. .result_unpriv = REJECT,
  1125. .result = ACCEPT,
  1126. },
  1127. {
  1128. "unpriv: write into frame pointer",
  1129. .insns = {
  1130. BPF_MOV64_REG(BPF_REG_10, BPF_REG_1),
  1131. BPF_MOV64_IMM(BPF_REG_0, 0),
  1132. BPF_EXIT_INSN(),
  1133. },
  1134. .errstr = "frame pointer is read only",
  1135. .result = REJECT,
  1136. },
  1137. {
  1138. "unpriv: cmp of frame pointer",
  1139. .insns = {
  1140. BPF_JMP_IMM(BPF_JEQ, BPF_REG_10, 0, 0),
  1141. BPF_MOV64_IMM(BPF_REG_0, 0),
  1142. BPF_EXIT_INSN(),
  1143. },
  1144. .errstr_unpriv = "R10 pointer comparison",
  1145. .result_unpriv = REJECT,
  1146. .result = ACCEPT,
  1147. },
  1148. {
  1149. "unpriv: cmp of stack pointer",
  1150. .insns = {
  1151. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  1152. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  1153. BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 0),
  1154. BPF_MOV64_IMM(BPF_REG_0, 0),
  1155. BPF_EXIT_INSN(),
  1156. },
  1157. .errstr_unpriv = "R2 pointer comparison",
  1158. .result_unpriv = REJECT,
  1159. .result = ACCEPT,
  1160. },
  1161. {
  1162. "unpriv: obfuscate stack pointer",
  1163. .insns = {
  1164. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  1165. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  1166. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  1167. BPF_MOV64_IMM(BPF_REG_0, 0),
  1168. BPF_EXIT_INSN(),
  1169. },
  1170. .errstr_unpriv = "R2 pointer arithmetic",
  1171. .result_unpriv = REJECT,
  1172. .result = ACCEPT,
  1173. },
  1174. };
  1175. static int probe_filter_length(struct bpf_insn *fp)
  1176. {
  1177. int len = 0;
  1178. for (len = MAX_INSNS - 1; len > 0; --len)
  1179. if (fp[len].code != 0 || fp[len].imm != 0)
  1180. break;
  1181. return len + 1;
  1182. }
  1183. static int create_map(void)
  1184. {
  1185. int map_fd;
  1186. map_fd = bpf_create_map(BPF_MAP_TYPE_HASH,
  1187. sizeof(long long), sizeof(long long), 1024);
  1188. if (map_fd < 0)
  1189. printf("failed to create map '%s'\n", strerror(errno));
  1190. return map_fd;
  1191. }
  1192. static int create_prog_array(void)
  1193. {
  1194. int map_fd;
  1195. map_fd = bpf_create_map(BPF_MAP_TYPE_PROG_ARRAY,
  1196. sizeof(int), sizeof(int), 4);
  1197. if (map_fd < 0)
  1198. printf("failed to create prog_array '%s'\n", strerror(errno));
  1199. return map_fd;
  1200. }
  1201. static int test(void)
  1202. {
  1203. int prog_fd, i, pass_cnt = 0, err_cnt = 0;
  1204. bool unpriv = geteuid() != 0;
  1205. for (i = 0; i < ARRAY_SIZE(tests); i++) {
  1206. struct bpf_insn *prog = tests[i].insns;
  1207. int prog_type = tests[i].prog_type;
  1208. int prog_len = probe_filter_length(prog);
  1209. int *fixup = tests[i].fixup;
  1210. int *prog_array_fixup = tests[i].prog_array_fixup;
  1211. int expected_result;
  1212. const char *expected_errstr;
  1213. int map_fd = -1, prog_array_fd = -1;
  1214. if (*fixup) {
  1215. map_fd = create_map();
  1216. do {
  1217. prog[*fixup].imm = map_fd;
  1218. fixup++;
  1219. } while (*fixup);
  1220. }
  1221. if (*prog_array_fixup) {
  1222. prog_array_fd = create_prog_array();
  1223. do {
  1224. prog[*prog_array_fixup].imm = prog_array_fd;
  1225. prog_array_fixup++;
  1226. } while (*prog_array_fixup);
  1227. }
  1228. printf("#%d %s ", i, tests[i].descr);
  1229. prog_fd = bpf_prog_load(prog_type ?: BPF_PROG_TYPE_SOCKET_FILTER,
  1230. prog, prog_len * sizeof(struct bpf_insn),
  1231. "GPL", 0);
  1232. if (unpriv && tests[i].result_unpriv != UNDEF)
  1233. expected_result = tests[i].result_unpriv;
  1234. else
  1235. expected_result = tests[i].result;
  1236. if (unpriv && tests[i].errstr_unpriv)
  1237. expected_errstr = tests[i].errstr_unpriv;
  1238. else
  1239. expected_errstr = tests[i].errstr;
  1240. if (expected_result == ACCEPT) {
  1241. if (prog_fd < 0) {
  1242. printf("FAIL\nfailed to load prog '%s'\n",
  1243. strerror(errno));
  1244. printf("%s", bpf_log_buf);
  1245. err_cnt++;
  1246. goto fail;
  1247. }
  1248. } else {
  1249. if (prog_fd >= 0) {
  1250. printf("FAIL\nunexpected success to load\n");
  1251. printf("%s", bpf_log_buf);
  1252. err_cnt++;
  1253. goto fail;
  1254. }
  1255. if (strstr(bpf_log_buf, expected_errstr) == 0) {
  1256. printf("FAIL\nunexpected error message: %s",
  1257. bpf_log_buf);
  1258. err_cnt++;
  1259. goto fail;
  1260. }
  1261. }
  1262. pass_cnt++;
  1263. printf("OK\n");
  1264. fail:
  1265. if (map_fd >= 0)
  1266. close(map_fd);
  1267. if (prog_array_fd >= 0)
  1268. close(prog_array_fd);
  1269. close(prog_fd);
  1270. }
  1271. printf("Summary: %d PASSED, %d FAILED\n", pass_cnt, err_cnt);
  1272. return 0;
  1273. }
  1274. int main(void)
  1275. {
  1276. struct rlimit r = {1 << 20, 1 << 20};
  1277. setrlimit(RLIMIT_MEMLOCK, &r);
  1278. return test();
  1279. }