trace_events_filter.c 55 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497
  1. /*
  2. * trace_events_filter - generic event filtering
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation; either version 2 of the License, or
  7. * (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write to the Free Software
  16. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  17. *
  18. * Copyright (C) 2009 Tom Zanussi <tzanussi@gmail.com>
  19. */
  20. #include <linux/module.h>
  21. #include <linux/ctype.h>
  22. #include <linux/mutex.h>
  23. #include <linux/perf_event.h>
  24. #include <linux/slab.h>
  25. #include "trace.h"
  26. #include "trace_output.h"
  27. #define DEFAULT_SYS_FILTER_MESSAGE \
  28. "### global filter ###\n" \
  29. "# Use this to set filters for multiple events.\n" \
  30. "# Only events with the given fields will be affected.\n" \
  31. "# If no events are modified, an error message will be displayed here"
  32. enum filter_op_ids
  33. {
  34. OP_OR,
  35. OP_AND,
  36. OP_GLOB,
  37. OP_NE,
  38. OP_EQ,
  39. OP_LT,
  40. OP_LE,
  41. OP_GT,
  42. OP_GE,
  43. OP_BAND,
  44. OP_NOT,
  45. OP_NONE,
  46. OP_OPEN_PAREN,
  47. };
  48. struct filter_op {
  49. int id;
  50. char *string;
  51. int precedence;
  52. };
  53. /* Order must be the same as enum filter_op_ids above */
  54. static struct filter_op filter_ops[] = {
  55. { OP_OR, "||", 1 },
  56. { OP_AND, "&&", 2 },
  57. { OP_GLOB, "~", 4 },
  58. { OP_NE, "!=", 4 },
  59. { OP_EQ, "==", 4 },
  60. { OP_LT, "<", 5 },
  61. { OP_LE, "<=", 5 },
  62. { OP_GT, ">", 5 },
  63. { OP_GE, ">=", 5 },
  64. { OP_BAND, "&", 6 },
  65. { OP_NOT, "!", 6 },
  66. { OP_NONE, "OP_NONE", 0 },
  67. { OP_OPEN_PAREN, "(", 0 },
  68. };
  69. enum {
  70. FILT_ERR_NONE,
  71. FILT_ERR_INVALID_OP,
  72. FILT_ERR_UNBALANCED_PAREN,
  73. FILT_ERR_TOO_MANY_OPERANDS,
  74. FILT_ERR_OPERAND_TOO_LONG,
  75. FILT_ERR_FIELD_NOT_FOUND,
  76. FILT_ERR_ILLEGAL_FIELD_OP,
  77. FILT_ERR_ILLEGAL_INTVAL,
  78. FILT_ERR_BAD_SUBSYS_FILTER,
  79. FILT_ERR_TOO_MANY_PREDS,
  80. FILT_ERR_MISSING_FIELD,
  81. FILT_ERR_INVALID_FILTER,
  82. FILT_ERR_IP_FIELD_ONLY,
  83. FILT_ERR_ILLEGAL_NOT_OP,
  84. };
  85. static char *err_text[] = {
  86. "No error",
  87. "Invalid operator",
  88. "Unbalanced parens",
  89. "Too many operands",
  90. "Operand too long",
  91. "Field not found",
  92. "Illegal operation for field type",
  93. "Illegal integer value",
  94. "Couldn't find or set field in one of a subsystem's events",
  95. "Too many terms in predicate expression",
  96. "Missing field name and/or value",
  97. "Meaningless filter expression",
  98. "Only 'ip' field is supported for function trace",
  99. "Illegal use of '!'",
  100. };
  101. struct opstack_op {
  102. int op;
  103. struct list_head list;
  104. };
  105. struct postfix_elt {
  106. int op;
  107. char *operand;
  108. struct list_head list;
  109. };
  110. struct filter_parse_state {
  111. struct filter_op *ops;
  112. struct list_head opstack;
  113. struct list_head postfix;
  114. int lasterr;
  115. int lasterr_pos;
  116. struct {
  117. char *string;
  118. unsigned int cnt;
  119. unsigned int tail;
  120. } infix;
  121. struct {
  122. char string[MAX_FILTER_STR_VAL];
  123. int pos;
  124. unsigned int tail;
  125. } operand;
  126. };
  127. struct pred_stack {
  128. struct filter_pred **preds;
  129. int index;
  130. };
  131. /* If not of not match is equal to not of not, then it is a match */
  132. #define DEFINE_COMPARISON_PRED(type) \
  133. static int filter_pred_##type(struct filter_pred *pred, void *event) \
  134. { \
  135. type *addr = (type *)(event + pred->offset); \
  136. type val = (type)pred->val; \
  137. int match = 0; \
  138. \
  139. switch (pred->op) { \
  140. case OP_LT: \
  141. match = (*addr < val); \
  142. break; \
  143. case OP_LE: \
  144. match = (*addr <= val); \
  145. break; \
  146. case OP_GT: \
  147. match = (*addr > val); \
  148. break; \
  149. case OP_GE: \
  150. match = (*addr >= val); \
  151. break; \
  152. case OP_BAND: \
  153. match = (*addr & val); \
  154. break; \
  155. default: \
  156. break; \
  157. } \
  158. \
  159. return !!match == !pred->not; \
  160. }
  161. #define DEFINE_EQUALITY_PRED(size) \
  162. static int filter_pred_##size(struct filter_pred *pred, void *event) \
  163. { \
  164. u##size *addr = (u##size *)(event + pred->offset); \
  165. u##size val = (u##size)pred->val; \
  166. int match; \
  167. \
  168. match = (val == *addr) ^ pred->not; \
  169. \
  170. return match; \
  171. }
  172. DEFINE_COMPARISON_PRED(s64);
  173. DEFINE_COMPARISON_PRED(u64);
  174. DEFINE_COMPARISON_PRED(s32);
  175. DEFINE_COMPARISON_PRED(u32);
  176. DEFINE_COMPARISON_PRED(s16);
  177. DEFINE_COMPARISON_PRED(u16);
  178. DEFINE_COMPARISON_PRED(s8);
  179. DEFINE_COMPARISON_PRED(u8);
  180. DEFINE_EQUALITY_PRED(64);
  181. DEFINE_EQUALITY_PRED(32);
  182. DEFINE_EQUALITY_PRED(16);
  183. DEFINE_EQUALITY_PRED(8);
  184. /* Filter predicate for fixed sized arrays of characters */
  185. static int filter_pred_string(struct filter_pred *pred, void *event)
  186. {
  187. char *addr = (char *)(event + pred->offset);
  188. int cmp, match;
  189. cmp = pred->regex.match(addr, &pred->regex, pred->regex.field_len);
  190. match = cmp ^ pred->not;
  191. return match;
  192. }
  193. /* Filter predicate for char * pointers */
  194. static int filter_pred_pchar(struct filter_pred *pred, void *event)
  195. {
  196. char **addr = (char **)(event + pred->offset);
  197. int cmp, match;
  198. int len = strlen(*addr) + 1; /* including tailing '\0' */
  199. cmp = pred->regex.match(*addr, &pred->regex, len);
  200. match = cmp ^ pred->not;
  201. return match;
  202. }
  203. /*
  204. * Filter predicate for dynamic sized arrays of characters.
  205. * These are implemented through a list of strings at the end
  206. * of the entry.
  207. * Also each of these strings have a field in the entry which
  208. * contains its offset from the beginning of the entry.
  209. * We have then first to get this field, dereference it
  210. * and add it to the address of the entry, and at last we have
  211. * the address of the string.
  212. */
  213. static int filter_pred_strloc(struct filter_pred *pred, void *event)
  214. {
  215. u32 str_item = *(u32 *)(event + pred->offset);
  216. int str_loc = str_item & 0xffff;
  217. int str_len = str_item >> 16;
  218. char *addr = (char *)(event + str_loc);
  219. int cmp, match;
  220. cmp = pred->regex.match(addr, &pred->regex, str_len);
  221. match = cmp ^ pred->not;
  222. return match;
  223. }
  224. /* Filter predicate for CPUs. */
  225. static int filter_pred_cpu(struct filter_pred *pred, void *event)
  226. {
  227. int cpu, cmp;
  228. int match = 0;
  229. cpu = raw_smp_processor_id();
  230. cmp = pred->val;
  231. switch (pred->op) {
  232. case OP_EQ:
  233. match = cpu == cmp;
  234. break;
  235. case OP_LT:
  236. match = cpu < cmp;
  237. break;
  238. case OP_LE:
  239. match = cpu <= cmp;
  240. break;
  241. case OP_GT:
  242. match = cpu > cmp;
  243. break;
  244. case OP_GE:
  245. match = cpu >= cmp;
  246. break;
  247. default:
  248. break;
  249. }
  250. return !!match == !pred->not;
  251. }
  252. /* Filter predicate for COMM. */
  253. static int filter_pred_comm(struct filter_pred *pred, void *event)
  254. {
  255. int cmp, match;
  256. cmp = pred->regex.match(current->comm, &pred->regex,
  257. pred->regex.field_len);
  258. match = cmp ^ pred->not;
  259. return match;
  260. }
  261. static int filter_pred_none(struct filter_pred *pred, void *event)
  262. {
  263. return 0;
  264. }
  265. /*
  266. * regex_match_foo - Basic regex callbacks
  267. *
  268. * @str: the string to be searched
  269. * @r: the regex structure containing the pattern string
  270. * @len: the length of the string to be searched (including '\0')
  271. *
  272. * Note:
  273. * - @str might not be NULL-terminated if it's of type DYN_STRING
  274. * or STATIC_STRING
  275. */
  276. static int regex_match_full(char *str, struct regex *r, int len)
  277. {
  278. if (strncmp(str, r->pattern, len) == 0)
  279. return 1;
  280. return 0;
  281. }
  282. static int regex_match_front(char *str, struct regex *r, int len)
  283. {
  284. if (len < r->len)
  285. return 0;
  286. if (strncmp(str, r->pattern, r->len) == 0)
  287. return 1;
  288. return 0;
  289. }
  290. static int regex_match_middle(char *str, struct regex *r, int len)
  291. {
  292. if (strnstr(str, r->pattern, len))
  293. return 1;
  294. return 0;
  295. }
  296. static int regex_match_end(char *str, struct regex *r, int len)
  297. {
  298. int strlen = len - 1;
  299. if (strlen >= r->len &&
  300. memcmp(str + strlen - r->len, r->pattern, r->len) == 0)
  301. return 1;
  302. return 0;
  303. }
  304. /**
  305. * filter_parse_regex - parse a basic regex
  306. * @buff: the raw regex
  307. * @len: length of the regex
  308. * @search: will point to the beginning of the string to compare
  309. * @not: tell whether the match will have to be inverted
  310. *
  311. * This passes in a buffer containing a regex and this function will
  312. * set search to point to the search part of the buffer and
  313. * return the type of search it is (see enum above).
  314. * This does modify buff.
  315. *
  316. * Returns enum type.
  317. * search returns the pointer to use for comparison.
  318. * not returns 1 if buff started with a '!'
  319. * 0 otherwise.
  320. */
  321. enum regex_type filter_parse_regex(char *buff, int len, char **search, int *not)
  322. {
  323. int type = MATCH_FULL;
  324. int i;
  325. if (buff[0] == '!') {
  326. *not = 1;
  327. buff++;
  328. len--;
  329. } else
  330. *not = 0;
  331. *search = buff;
  332. for (i = 0; i < len; i++) {
  333. if (buff[i] == '*') {
  334. if (!i) {
  335. *search = buff + 1;
  336. type = MATCH_END_ONLY;
  337. } else {
  338. if (type == MATCH_END_ONLY)
  339. type = MATCH_MIDDLE_ONLY;
  340. else
  341. type = MATCH_FRONT_ONLY;
  342. buff[i] = 0;
  343. break;
  344. }
  345. }
  346. }
  347. return type;
  348. }
  349. static void filter_build_regex(struct filter_pred *pred)
  350. {
  351. struct regex *r = &pred->regex;
  352. char *search;
  353. enum regex_type type = MATCH_FULL;
  354. int not = 0;
  355. if (pred->op == OP_GLOB) {
  356. type = filter_parse_regex(r->pattern, r->len, &search, &not);
  357. r->len = strlen(search);
  358. memmove(r->pattern, search, r->len+1);
  359. }
  360. switch (type) {
  361. case MATCH_FULL:
  362. r->match = regex_match_full;
  363. break;
  364. case MATCH_FRONT_ONLY:
  365. r->match = regex_match_front;
  366. break;
  367. case MATCH_MIDDLE_ONLY:
  368. r->match = regex_match_middle;
  369. break;
  370. case MATCH_END_ONLY:
  371. r->match = regex_match_end;
  372. break;
  373. }
  374. pred->not ^= not;
  375. }
  376. enum move_type {
  377. MOVE_DOWN,
  378. MOVE_UP_FROM_LEFT,
  379. MOVE_UP_FROM_RIGHT
  380. };
  381. static struct filter_pred *
  382. get_pred_parent(struct filter_pred *pred, struct filter_pred *preds,
  383. int index, enum move_type *move)
  384. {
  385. if (pred->parent & FILTER_PRED_IS_RIGHT)
  386. *move = MOVE_UP_FROM_RIGHT;
  387. else
  388. *move = MOVE_UP_FROM_LEFT;
  389. pred = &preds[pred->parent & ~FILTER_PRED_IS_RIGHT];
  390. return pred;
  391. }
  392. enum walk_return {
  393. WALK_PRED_ABORT,
  394. WALK_PRED_PARENT,
  395. WALK_PRED_DEFAULT,
  396. };
  397. typedef int (*filter_pred_walkcb_t) (enum move_type move,
  398. struct filter_pred *pred,
  399. int *err, void *data);
  400. static int walk_pred_tree(struct filter_pred *preds,
  401. struct filter_pred *root,
  402. filter_pred_walkcb_t cb, void *data)
  403. {
  404. struct filter_pred *pred = root;
  405. enum move_type move = MOVE_DOWN;
  406. int done = 0;
  407. if (!preds)
  408. return -EINVAL;
  409. do {
  410. int err = 0, ret;
  411. ret = cb(move, pred, &err, data);
  412. if (ret == WALK_PRED_ABORT)
  413. return err;
  414. if (ret == WALK_PRED_PARENT)
  415. goto get_parent;
  416. switch (move) {
  417. case MOVE_DOWN:
  418. if (pred->left != FILTER_PRED_INVALID) {
  419. pred = &preds[pred->left];
  420. continue;
  421. }
  422. goto get_parent;
  423. case MOVE_UP_FROM_LEFT:
  424. pred = &preds[pred->right];
  425. move = MOVE_DOWN;
  426. continue;
  427. case MOVE_UP_FROM_RIGHT:
  428. get_parent:
  429. if (pred == root)
  430. break;
  431. pred = get_pred_parent(pred, preds,
  432. pred->parent,
  433. &move);
  434. continue;
  435. }
  436. done = 1;
  437. } while (!done);
  438. /* We are fine. */
  439. return 0;
  440. }
  441. /*
  442. * A series of AND or ORs where found together. Instead of
  443. * climbing up and down the tree branches, an array of the
  444. * ops were made in order of checks. We can just move across
  445. * the array and short circuit if needed.
  446. */
  447. static int process_ops(struct filter_pred *preds,
  448. struct filter_pred *op, void *rec)
  449. {
  450. struct filter_pred *pred;
  451. int match = 0;
  452. int type;
  453. int i;
  454. /*
  455. * Micro-optimization: We set type to true if op
  456. * is an OR and false otherwise (AND). Then we
  457. * just need to test if the match is equal to
  458. * the type, and if it is, we can short circuit the
  459. * rest of the checks:
  460. *
  461. * if ((match && op->op == OP_OR) ||
  462. * (!match && op->op == OP_AND))
  463. * return match;
  464. */
  465. type = op->op == OP_OR;
  466. for (i = 0; i < op->val; i++) {
  467. pred = &preds[op->ops[i]];
  468. if (!WARN_ON_ONCE(!pred->fn))
  469. match = pred->fn(pred, rec);
  470. if (!!match == type)
  471. break;
  472. }
  473. /* If not of not match is equal to not of not, then it is a match */
  474. return !!match == !op->not;
  475. }
  476. struct filter_match_preds_data {
  477. struct filter_pred *preds;
  478. int match;
  479. void *rec;
  480. };
  481. static int filter_match_preds_cb(enum move_type move, struct filter_pred *pred,
  482. int *err, void *data)
  483. {
  484. struct filter_match_preds_data *d = data;
  485. *err = 0;
  486. switch (move) {
  487. case MOVE_DOWN:
  488. /* only AND and OR have children */
  489. if (pred->left != FILTER_PRED_INVALID) {
  490. /* If ops is set, then it was folded. */
  491. if (!pred->ops)
  492. return WALK_PRED_DEFAULT;
  493. /* We can treat folded ops as a leaf node */
  494. d->match = process_ops(d->preds, pred, d->rec);
  495. } else {
  496. if (!WARN_ON_ONCE(!pred->fn))
  497. d->match = pred->fn(pred, d->rec);
  498. }
  499. return WALK_PRED_PARENT;
  500. case MOVE_UP_FROM_LEFT:
  501. /*
  502. * Check for short circuits.
  503. *
  504. * Optimization: !!match == (pred->op == OP_OR)
  505. * is the same as:
  506. * if ((match && pred->op == OP_OR) ||
  507. * (!match && pred->op == OP_AND))
  508. */
  509. if (!!d->match == (pred->op == OP_OR))
  510. return WALK_PRED_PARENT;
  511. break;
  512. case MOVE_UP_FROM_RIGHT:
  513. break;
  514. }
  515. return WALK_PRED_DEFAULT;
  516. }
  517. /* return 1 if event matches, 0 otherwise (discard) */
  518. int filter_match_preds(struct event_filter *filter, void *rec)
  519. {
  520. struct filter_pred *preds;
  521. struct filter_pred *root;
  522. struct filter_match_preds_data data = {
  523. /* match is currently meaningless */
  524. .match = -1,
  525. .rec = rec,
  526. };
  527. int n_preds, ret;
  528. /* no filter is considered a match */
  529. if (!filter)
  530. return 1;
  531. n_preds = filter->n_preds;
  532. if (!n_preds)
  533. return 1;
  534. /*
  535. * n_preds, root and filter->preds are protect with preemption disabled.
  536. */
  537. root = rcu_dereference_sched(filter->root);
  538. if (!root)
  539. return 1;
  540. data.preds = preds = rcu_dereference_sched(filter->preds);
  541. ret = walk_pred_tree(preds, root, filter_match_preds_cb, &data);
  542. WARN_ON(ret);
  543. return data.match;
  544. }
  545. EXPORT_SYMBOL_GPL(filter_match_preds);
  546. static void parse_error(struct filter_parse_state *ps, int err, int pos)
  547. {
  548. ps->lasterr = err;
  549. ps->lasterr_pos = pos;
  550. }
  551. static void remove_filter_string(struct event_filter *filter)
  552. {
  553. if (!filter)
  554. return;
  555. kfree(filter->filter_string);
  556. filter->filter_string = NULL;
  557. }
  558. static int replace_filter_string(struct event_filter *filter,
  559. char *filter_string)
  560. {
  561. kfree(filter->filter_string);
  562. filter->filter_string = kstrdup(filter_string, GFP_KERNEL);
  563. if (!filter->filter_string)
  564. return -ENOMEM;
  565. return 0;
  566. }
  567. static int append_filter_string(struct event_filter *filter,
  568. char *string)
  569. {
  570. int newlen;
  571. char *new_filter_string;
  572. BUG_ON(!filter->filter_string);
  573. newlen = strlen(filter->filter_string) + strlen(string) + 1;
  574. new_filter_string = kmalloc(newlen, GFP_KERNEL);
  575. if (!new_filter_string)
  576. return -ENOMEM;
  577. strcpy(new_filter_string, filter->filter_string);
  578. strcat(new_filter_string, string);
  579. kfree(filter->filter_string);
  580. filter->filter_string = new_filter_string;
  581. return 0;
  582. }
  583. static void append_filter_err(struct filter_parse_state *ps,
  584. struct event_filter *filter)
  585. {
  586. int pos = ps->lasterr_pos;
  587. char *buf, *pbuf;
  588. buf = (char *)__get_free_page(GFP_TEMPORARY);
  589. if (!buf)
  590. return;
  591. append_filter_string(filter, "\n");
  592. memset(buf, ' ', PAGE_SIZE);
  593. if (pos > PAGE_SIZE - 128)
  594. pos = 0;
  595. buf[pos] = '^';
  596. pbuf = &buf[pos] + 1;
  597. sprintf(pbuf, "\nparse_error: %s\n", err_text[ps->lasterr]);
  598. append_filter_string(filter, buf);
  599. free_page((unsigned long) buf);
  600. }
  601. static inline struct event_filter *event_filter(struct trace_event_file *file)
  602. {
  603. if (file->event_call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
  604. return file->event_call->filter;
  605. else
  606. return file->filter;
  607. }
  608. /* caller must hold event_mutex */
  609. void print_event_filter(struct trace_event_file *file, struct trace_seq *s)
  610. {
  611. struct event_filter *filter = event_filter(file);
  612. if (filter && filter->filter_string)
  613. trace_seq_printf(s, "%s\n", filter->filter_string);
  614. else
  615. trace_seq_puts(s, "none\n");
  616. }
  617. void print_subsystem_event_filter(struct event_subsystem *system,
  618. struct trace_seq *s)
  619. {
  620. struct event_filter *filter;
  621. mutex_lock(&event_mutex);
  622. filter = system->filter;
  623. if (filter && filter->filter_string)
  624. trace_seq_printf(s, "%s\n", filter->filter_string);
  625. else
  626. trace_seq_puts(s, DEFAULT_SYS_FILTER_MESSAGE "\n");
  627. mutex_unlock(&event_mutex);
  628. }
  629. static int __alloc_pred_stack(struct pred_stack *stack, int n_preds)
  630. {
  631. stack->preds = kcalloc(n_preds + 1, sizeof(*stack->preds), GFP_KERNEL);
  632. if (!stack->preds)
  633. return -ENOMEM;
  634. stack->index = n_preds;
  635. return 0;
  636. }
  637. static void __free_pred_stack(struct pred_stack *stack)
  638. {
  639. kfree(stack->preds);
  640. stack->index = 0;
  641. }
  642. static int __push_pred_stack(struct pred_stack *stack,
  643. struct filter_pred *pred)
  644. {
  645. int index = stack->index;
  646. if (WARN_ON(index == 0))
  647. return -ENOSPC;
  648. stack->preds[--index] = pred;
  649. stack->index = index;
  650. return 0;
  651. }
  652. static struct filter_pred *
  653. __pop_pred_stack(struct pred_stack *stack)
  654. {
  655. struct filter_pred *pred;
  656. int index = stack->index;
  657. pred = stack->preds[index++];
  658. if (!pred)
  659. return NULL;
  660. stack->index = index;
  661. return pred;
  662. }
  663. static int filter_set_pred(struct event_filter *filter,
  664. int idx,
  665. struct pred_stack *stack,
  666. struct filter_pred *src)
  667. {
  668. struct filter_pred *dest = &filter->preds[idx];
  669. struct filter_pred *left;
  670. struct filter_pred *right;
  671. *dest = *src;
  672. dest->index = idx;
  673. if (dest->op == OP_OR || dest->op == OP_AND) {
  674. right = __pop_pred_stack(stack);
  675. left = __pop_pred_stack(stack);
  676. if (!left || !right)
  677. return -EINVAL;
  678. /*
  679. * If both children can be folded
  680. * and they are the same op as this op or a leaf,
  681. * then this op can be folded.
  682. */
  683. if (left->index & FILTER_PRED_FOLD &&
  684. ((left->op == dest->op && !left->not) ||
  685. left->left == FILTER_PRED_INVALID) &&
  686. right->index & FILTER_PRED_FOLD &&
  687. ((right->op == dest->op && !right->not) ||
  688. right->left == FILTER_PRED_INVALID))
  689. dest->index |= FILTER_PRED_FOLD;
  690. dest->left = left->index & ~FILTER_PRED_FOLD;
  691. dest->right = right->index & ~FILTER_PRED_FOLD;
  692. left->parent = dest->index & ~FILTER_PRED_FOLD;
  693. right->parent = dest->index | FILTER_PRED_IS_RIGHT;
  694. } else {
  695. /*
  696. * Make dest->left invalid to be used as a quick
  697. * way to know this is a leaf node.
  698. */
  699. dest->left = FILTER_PRED_INVALID;
  700. /* All leafs allow folding the parent ops. */
  701. dest->index |= FILTER_PRED_FOLD;
  702. }
  703. return __push_pred_stack(stack, dest);
  704. }
  705. static void __free_preds(struct event_filter *filter)
  706. {
  707. int i;
  708. if (filter->preds) {
  709. for (i = 0; i < filter->n_preds; i++)
  710. kfree(filter->preds[i].ops);
  711. kfree(filter->preds);
  712. filter->preds = NULL;
  713. }
  714. filter->a_preds = 0;
  715. filter->n_preds = 0;
  716. }
  717. static void filter_disable(struct trace_event_file *file)
  718. {
  719. struct trace_event_call *call = file->event_call;
  720. if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
  721. call->flags &= ~TRACE_EVENT_FL_FILTERED;
  722. else
  723. file->flags &= ~EVENT_FILE_FL_FILTERED;
  724. }
  725. static void __free_filter(struct event_filter *filter)
  726. {
  727. if (!filter)
  728. return;
  729. __free_preds(filter);
  730. kfree(filter->filter_string);
  731. kfree(filter);
  732. }
  733. void free_event_filter(struct event_filter *filter)
  734. {
  735. __free_filter(filter);
  736. }
  737. static struct event_filter *__alloc_filter(void)
  738. {
  739. struct event_filter *filter;
  740. filter = kzalloc(sizeof(*filter), GFP_KERNEL);
  741. return filter;
  742. }
  743. static int __alloc_preds(struct event_filter *filter, int n_preds)
  744. {
  745. struct filter_pred *pred;
  746. int i;
  747. if (filter->preds)
  748. __free_preds(filter);
  749. filter->preds = kcalloc(n_preds, sizeof(*filter->preds), GFP_KERNEL);
  750. if (!filter->preds)
  751. return -ENOMEM;
  752. filter->a_preds = n_preds;
  753. filter->n_preds = 0;
  754. for (i = 0; i < n_preds; i++) {
  755. pred = &filter->preds[i];
  756. pred->fn = filter_pred_none;
  757. }
  758. return 0;
  759. }
  760. static inline void __remove_filter(struct trace_event_file *file)
  761. {
  762. struct trace_event_call *call = file->event_call;
  763. filter_disable(file);
  764. if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
  765. remove_filter_string(call->filter);
  766. else
  767. remove_filter_string(file->filter);
  768. }
  769. static void filter_free_subsystem_preds(struct trace_subsystem_dir *dir,
  770. struct trace_array *tr)
  771. {
  772. struct trace_event_file *file;
  773. list_for_each_entry(file, &tr->events, list) {
  774. if (file->system != dir)
  775. continue;
  776. __remove_filter(file);
  777. }
  778. }
  779. static inline void __free_subsystem_filter(struct trace_event_file *file)
  780. {
  781. struct trace_event_call *call = file->event_call;
  782. if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) {
  783. __free_filter(call->filter);
  784. call->filter = NULL;
  785. } else {
  786. __free_filter(file->filter);
  787. file->filter = NULL;
  788. }
  789. }
  790. static void filter_free_subsystem_filters(struct trace_subsystem_dir *dir,
  791. struct trace_array *tr)
  792. {
  793. struct trace_event_file *file;
  794. list_for_each_entry(file, &tr->events, list) {
  795. if (file->system != dir)
  796. continue;
  797. __free_subsystem_filter(file);
  798. }
  799. }
  800. static int filter_add_pred(struct filter_parse_state *ps,
  801. struct event_filter *filter,
  802. struct filter_pred *pred,
  803. struct pred_stack *stack)
  804. {
  805. int err;
  806. if (WARN_ON(filter->n_preds == filter->a_preds)) {
  807. parse_error(ps, FILT_ERR_TOO_MANY_PREDS, 0);
  808. return -ENOSPC;
  809. }
  810. err = filter_set_pred(filter, filter->n_preds, stack, pred);
  811. if (err)
  812. return err;
  813. filter->n_preds++;
  814. return 0;
  815. }
  816. int filter_assign_type(const char *type)
  817. {
  818. if (strstr(type, "__data_loc") && strstr(type, "char"))
  819. return FILTER_DYN_STRING;
  820. if (strchr(type, '[') && strstr(type, "char"))
  821. return FILTER_STATIC_STRING;
  822. return FILTER_OTHER;
  823. }
  824. static bool is_function_field(struct ftrace_event_field *field)
  825. {
  826. return field->filter_type == FILTER_TRACE_FN;
  827. }
  828. static bool is_string_field(struct ftrace_event_field *field)
  829. {
  830. return field->filter_type == FILTER_DYN_STRING ||
  831. field->filter_type == FILTER_STATIC_STRING ||
  832. field->filter_type == FILTER_PTR_STRING;
  833. }
  834. static bool is_legal_op(struct ftrace_event_field *field, int op)
  835. {
  836. if (is_string_field(field) &&
  837. (op != OP_EQ && op != OP_NE && op != OP_GLOB))
  838. return false;
  839. if (!is_string_field(field) && op == OP_GLOB)
  840. return false;
  841. return true;
  842. }
  843. static filter_pred_fn_t select_comparison_fn(int op, int field_size,
  844. int field_is_signed)
  845. {
  846. filter_pred_fn_t fn = NULL;
  847. switch (field_size) {
  848. case 8:
  849. if (op == OP_EQ || op == OP_NE)
  850. fn = filter_pred_64;
  851. else if (field_is_signed)
  852. fn = filter_pred_s64;
  853. else
  854. fn = filter_pred_u64;
  855. break;
  856. case 4:
  857. if (op == OP_EQ || op == OP_NE)
  858. fn = filter_pred_32;
  859. else if (field_is_signed)
  860. fn = filter_pred_s32;
  861. else
  862. fn = filter_pred_u32;
  863. break;
  864. case 2:
  865. if (op == OP_EQ || op == OP_NE)
  866. fn = filter_pred_16;
  867. else if (field_is_signed)
  868. fn = filter_pred_s16;
  869. else
  870. fn = filter_pred_u16;
  871. break;
  872. case 1:
  873. if (op == OP_EQ || op == OP_NE)
  874. fn = filter_pred_8;
  875. else if (field_is_signed)
  876. fn = filter_pred_s8;
  877. else
  878. fn = filter_pred_u8;
  879. break;
  880. }
  881. return fn;
  882. }
  883. static int init_pred(struct filter_parse_state *ps,
  884. struct ftrace_event_field *field,
  885. struct filter_pred *pred)
  886. {
  887. filter_pred_fn_t fn = filter_pred_none;
  888. unsigned long long val;
  889. int ret;
  890. pred->offset = field->offset;
  891. if (!is_legal_op(field, pred->op)) {
  892. parse_error(ps, FILT_ERR_ILLEGAL_FIELD_OP, 0);
  893. return -EINVAL;
  894. }
  895. if (field->filter_type == FILTER_COMM) {
  896. filter_build_regex(pred);
  897. fn = filter_pred_comm;
  898. pred->regex.field_len = TASK_COMM_LEN;
  899. } else if (is_string_field(field)) {
  900. filter_build_regex(pred);
  901. if (field->filter_type == FILTER_STATIC_STRING) {
  902. fn = filter_pred_string;
  903. pred->regex.field_len = field->size;
  904. } else if (field->filter_type == FILTER_DYN_STRING)
  905. fn = filter_pred_strloc;
  906. else
  907. fn = filter_pred_pchar;
  908. } else if (is_function_field(field)) {
  909. if (strcmp(field->name, "ip")) {
  910. parse_error(ps, FILT_ERR_IP_FIELD_ONLY, 0);
  911. return -EINVAL;
  912. }
  913. } else {
  914. if (field->is_signed)
  915. ret = kstrtoll(pred->regex.pattern, 0, &val);
  916. else
  917. ret = kstrtoull(pred->regex.pattern, 0, &val);
  918. if (ret) {
  919. parse_error(ps, FILT_ERR_ILLEGAL_INTVAL, 0);
  920. return -EINVAL;
  921. }
  922. pred->val = val;
  923. if (field->filter_type == FILTER_CPU)
  924. fn = filter_pred_cpu;
  925. else
  926. fn = select_comparison_fn(pred->op, field->size,
  927. field->is_signed);
  928. if (!fn) {
  929. parse_error(ps, FILT_ERR_INVALID_OP, 0);
  930. return -EINVAL;
  931. }
  932. }
  933. if (pred->op == OP_NE)
  934. pred->not ^= 1;
  935. pred->fn = fn;
  936. return 0;
  937. }
  938. static void parse_init(struct filter_parse_state *ps,
  939. struct filter_op *ops,
  940. char *infix_string)
  941. {
  942. memset(ps, '\0', sizeof(*ps));
  943. ps->infix.string = infix_string;
  944. ps->infix.cnt = strlen(infix_string);
  945. ps->ops = ops;
  946. INIT_LIST_HEAD(&ps->opstack);
  947. INIT_LIST_HEAD(&ps->postfix);
  948. }
  949. static char infix_next(struct filter_parse_state *ps)
  950. {
  951. if (!ps->infix.cnt)
  952. return 0;
  953. ps->infix.cnt--;
  954. return ps->infix.string[ps->infix.tail++];
  955. }
  956. static char infix_peek(struct filter_parse_state *ps)
  957. {
  958. if (ps->infix.tail == strlen(ps->infix.string))
  959. return 0;
  960. return ps->infix.string[ps->infix.tail];
  961. }
  962. static void infix_advance(struct filter_parse_state *ps)
  963. {
  964. if (!ps->infix.cnt)
  965. return;
  966. ps->infix.cnt--;
  967. ps->infix.tail++;
  968. }
  969. static inline int is_precedence_lower(struct filter_parse_state *ps,
  970. int a, int b)
  971. {
  972. return ps->ops[a].precedence < ps->ops[b].precedence;
  973. }
  974. static inline int is_op_char(struct filter_parse_state *ps, char c)
  975. {
  976. int i;
  977. for (i = 0; strcmp(ps->ops[i].string, "OP_NONE"); i++) {
  978. if (ps->ops[i].string[0] == c)
  979. return 1;
  980. }
  981. return 0;
  982. }
  983. static int infix_get_op(struct filter_parse_state *ps, char firstc)
  984. {
  985. char nextc = infix_peek(ps);
  986. char opstr[3];
  987. int i;
  988. opstr[0] = firstc;
  989. opstr[1] = nextc;
  990. opstr[2] = '\0';
  991. for (i = 0; strcmp(ps->ops[i].string, "OP_NONE"); i++) {
  992. if (!strcmp(opstr, ps->ops[i].string)) {
  993. infix_advance(ps);
  994. return ps->ops[i].id;
  995. }
  996. }
  997. opstr[1] = '\0';
  998. for (i = 0; strcmp(ps->ops[i].string, "OP_NONE"); i++) {
  999. if (!strcmp(opstr, ps->ops[i].string))
  1000. return ps->ops[i].id;
  1001. }
  1002. return OP_NONE;
  1003. }
  1004. static inline void clear_operand_string(struct filter_parse_state *ps)
  1005. {
  1006. memset(ps->operand.string, '\0', MAX_FILTER_STR_VAL);
  1007. ps->operand.tail = 0;
  1008. }
  1009. static inline int append_operand_char(struct filter_parse_state *ps, char c)
  1010. {
  1011. if (ps->operand.tail == MAX_FILTER_STR_VAL - 1)
  1012. return -EINVAL;
  1013. ps->operand.string[ps->operand.tail++] = c;
  1014. return 0;
  1015. }
  1016. static int filter_opstack_push(struct filter_parse_state *ps, int op)
  1017. {
  1018. struct opstack_op *opstack_op;
  1019. opstack_op = kmalloc(sizeof(*opstack_op), GFP_KERNEL);
  1020. if (!opstack_op)
  1021. return -ENOMEM;
  1022. opstack_op->op = op;
  1023. list_add(&opstack_op->list, &ps->opstack);
  1024. return 0;
  1025. }
  1026. static int filter_opstack_empty(struct filter_parse_state *ps)
  1027. {
  1028. return list_empty(&ps->opstack);
  1029. }
  1030. static int filter_opstack_top(struct filter_parse_state *ps)
  1031. {
  1032. struct opstack_op *opstack_op;
  1033. if (filter_opstack_empty(ps))
  1034. return OP_NONE;
  1035. opstack_op = list_first_entry(&ps->opstack, struct opstack_op, list);
  1036. return opstack_op->op;
  1037. }
  1038. static int filter_opstack_pop(struct filter_parse_state *ps)
  1039. {
  1040. struct opstack_op *opstack_op;
  1041. int op;
  1042. if (filter_opstack_empty(ps))
  1043. return OP_NONE;
  1044. opstack_op = list_first_entry(&ps->opstack, struct opstack_op, list);
  1045. op = opstack_op->op;
  1046. list_del(&opstack_op->list);
  1047. kfree(opstack_op);
  1048. return op;
  1049. }
  1050. static void filter_opstack_clear(struct filter_parse_state *ps)
  1051. {
  1052. while (!filter_opstack_empty(ps))
  1053. filter_opstack_pop(ps);
  1054. }
  1055. static char *curr_operand(struct filter_parse_state *ps)
  1056. {
  1057. return ps->operand.string;
  1058. }
  1059. static int postfix_append_operand(struct filter_parse_state *ps, char *operand)
  1060. {
  1061. struct postfix_elt *elt;
  1062. elt = kmalloc(sizeof(*elt), GFP_KERNEL);
  1063. if (!elt)
  1064. return -ENOMEM;
  1065. elt->op = OP_NONE;
  1066. elt->operand = kstrdup(operand, GFP_KERNEL);
  1067. if (!elt->operand) {
  1068. kfree(elt);
  1069. return -ENOMEM;
  1070. }
  1071. list_add_tail(&elt->list, &ps->postfix);
  1072. return 0;
  1073. }
  1074. static int postfix_append_op(struct filter_parse_state *ps, int op)
  1075. {
  1076. struct postfix_elt *elt;
  1077. elt = kmalloc(sizeof(*elt), GFP_KERNEL);
  1078. if (!elt)
  1079. return -ENOMEM;
  1080. elt->op = op;
  1081. elt->operand = NULL;
  1082. list_add_tail(&elt->list, &ps->postfix);
  1083. return 0;
  1084. }
  1085. static void postfix_clear(struct filter_parse_state *ps)
  1086. {
  1087. struct postfix_elt *elt;
  1088. while (!list_empty(&ps->postfix)) {
  1089. elt = list_first_entry(&ps->postfix, struct postfix_elt, list);
  1090. list_del(&elt->list);
  1091. kfree(elt->operand);
  1092. kfree(elt);
  1093. }
  1094. }
  1095. static int filter_parse(struct filter_parse_state *ps)
  1096. {
  1097. int in_string = 0;
  1098. int op, top_op;
  1099. char ch;
  1100. while ((ch = infix_next(ps))) {
  1101. if (ch == '"') {
  1102. in_string ^= 1;
  1103. continue;
  1104. }
  1105. if (in_string)
  1106. goto parse_operand;
  1107. if (isspace(ch))
  1108. continue;
  1109. if (is_op_char(ps, ch)) {
  1110. op = infix_get_op(ps, ch);
  1111. if (op == OP_NONE) {
  1112. parse_error(ps, FILT_ERR_INVALID_OP, 0);
  1113. return -EINVAL;
  1114. }
  1115. if (strlen(curr_operand(ps))) {
  1116. postfix_append_operand(ps, curr_operand(ps));
  1117. clear_operand_string(ps);
  1118. }
  1119. while (!filter_opstack_empty(ps)) {
  1120. top_op = filter_opstack_top(ps);
  1121. if (!is_precedence_lower(ps, top_op, op)) {
  1122. top_op = filter_opstack_pop(ps);
  1123. postfix_append_op(ps, top_op);
  1124. continue;
  1125. }
  1126. break;
  1127. }
  1128. filter_opstack_push(ps, op);
  1129. continue;
  1130. }
  1131. if (ch == '(') {
  1132. filter_opstack_push(ps, OP_OPEN_PAREN);
  1133. continue;
  1134. }
  1135. if (ch == ')') {
  1136. if (strlen(curr_operand(ps))) {
  1137. postfix_append_operand(ps, curr_operand(ps));
  1138. clear_operand_string(ps);
  1139. }
  1140. top_op = filter_opstack_pop(ps);
  1141. while (top_op != OP_NONE) {
  1142. if (top_op == OP_OPEN_PAREN)
  1143. break;
  1144. postfix_append_op(ps, top_op);
  1145. top_op = filter_opstack_pop(ps);
  1146. }
  1147. if (top_op == OP_NONE) {
  1148. parse_error(ps, FILT_ERR_UNBALANCED_PAREN, 0);
  1149. return -EINVAL;
  1150. }
  1151. continue;
  1152. }
  1153. parse_operand:
  1154. if (append_operand_char(ps, ch)) {
  1155. parse_error(ps, FILT_ERR_OPERAND_TOO_LONG, 0);
  1156. return -EINVAL;
  1157. }
  1158. }
  1159. if (strlen(curr_operand(ps)))
  1160. postfix_append_operand(ps, curr_operand(ps));
  1161. while (!filter_opstack_empty(ps)) {
  1162. top_op = filter_opstack_pop(ps);
  1163. if (top_op == OP_NONE)
  1164. break;
  1165. if (top_op == OP_OPEN_PAREN) {
  1166. parse_error(ps, FILT_ERR_UNBALANCED_PAREN, 0);
  1167. return -EINVAL;
  1168. }
  1169. postfix_append_op(ps, top_op);
  1170. }
  1171. return 0;
  1172. }
  1173. static struct filter_pred *create_pred(struct filter_parse_state *ps,
  1174. struct trace_event_call *call,
  1175. int op, char *operand1, char *operand2)
  1176. {
  1177. struct ftrace_event_field *field;
  1178. static struct filter_pred pred;
  1179. memset(&pred, 0, sizeof(pred));
  1180. pred.op = op;
  1181. if (op == OP_AND || op == OP_OR)
  1182. return &pred;
  1183. if (!operand1 || !operand2) {
  1184. parse_error(ps, FILT_ERR_MISSING_FIELD, 0);
  1185. return NULL;
  1186. }
  1187. field = trace_find_event_field(call, operand1);
  1188. if (!field) {
  1189. parse_error(ps, FILT_ERR_FIELD_NOT_FOUND, 0);
  1190. return NULL;
  1191. }
  1192. strcpy(pred.regex.pattern, operand2);
  1193. pred.regex.len = strlen(pred.regex.pattern);
  1194. pred.field = field;
  1195. return init_pred(ps, field, &pred) ? NULL : &pred;
  1196. }
  1197. static int check_preds(struct filter_parse_state *ps)
  1198. {
  1199. int n_normal_preds = 0, n_logical_preds = 0;
  1200. struct postfix_elt *elt;
  1201. int cnt = 0;
  1202. list_for_each_entry(elt, &ps->postfix, list) {
  1203. if (elt->op == OP_NONE) {
  1204. cnt++;
  1205. continue;
  1206. }
  1207. if (elt->op == OP_AND || elt->op == OP_OR) {
  1208. n_logical_preds++;
  1209. cnt--;
  1210. continue;
  1211. }
  1212. if (elt->op != OP_NOT)
  1213. cnt--;
  1214. n_normal_preds++;
  1215. /* all ops should have operands */
  1216. if (cnt < 0)
  1217. break;
  1218. }
  1219. if (cnt != 1 || !n_normal_preds || n_logical_preds >= n_normal_preds) {
  1220. parse_error(ps, FILT_ERR_INVALID_FILTER, 0);
  1221. return -EINVAL;
  1222. }
  1223. return 0;
  1224. }
  1225. static int count_preds(struct filter_parse_state *ps)
  1226. {
  1227. struct postfix_elt *elt;
  1228. int n_preds = 0;
  1229. list_for_each_entry(elt, &ps->postfix, list) {
  1230. if (elt->op == OP_NONE)
  1231. continue;
  1232. n_preds++;
  1233. }
  1234. return n_preds;
  1235. }
  1236. struct check_pred_data {
  1237. int count;
  1238. int max;
  1239. };
  1240. static int check_pred_tree_cb(enum move_type move, struct filter_pred *pred,
  1241. int *err, void *data)
  1242. {
  1243. struct check_pred_data *d = data;
  1244. if (WARN_ON(d->count++ > d->max)) {
  1245. *err = -EINVAL;
  1246. return WALK_PRED_ABORT;
  1247. }
  1248. return WALK_PRED_DEFAULT;
  1249. }
  1250. /*
  1251. * The tree is walked at filtering of an event. If the tree is not correctly
  1252. * built, it may cause an infinite loop. Check here that the tree does
  1253. * indeed terminate.
  1254. */
  1255. static int check_pred_tree(struct event_filter *filter,
  1256. struct filter_pred *root)
  1257. {
  1258. struct check_pred_data data = {
  1259. /*
  1260. * The max that we can hit a node is three times.
  1261. * Once going down, once coming up from left, and
  1262. * once coming up from right. This is more than enough
  1263. * since leafs are only hit a single time.
  1264. */
  1265. .max = 3 * filter->n_preds,
  1266. .count = 0,
  1267. };
  1268. return walk_pred_tree(filter->preds, root,
  1269. check_pred_tree_cb, &data);
  1270. }
  1271. static int count_leafs_cb(enum move_type move, struct filter_pred *pred,
  1272. int *err, void *data)
  1273. {
  1274. int *count = data;
  1275. if ((move == MOVE_DOWN) &&
  1276. (pred->left == FILTER_PRED_INVALID))
  1277. (*count)++;
  1278. return WALK_PRED_DEFAULT;
  1279. }
  1280. static int count_leafs(struct filter_pred *preds, struct filter_pred *root)
  1281. {
  1282. int count = 0, ret;
  1283. ret = walk_pred_tree(preds, root, count_leafs_cb, &count);
  1284. WARN_ON(ret);
  1285. return count;
  1286. }
  1287. struct fold_pred_data {
  1288. struct filter_pred *root;
  1289. int count;
  1290. int children;
  1291. };
  1292. static int fold_pred_cb(enum move_type move, struct filter_pred *pred,
  1293. int *err, void *data)
  1294. {
  1295. struct fold_pred_data *d = data;
  1296. struct filter_pred *root = d->root;
  1297. if (move != MOVE_DOWN)
  1298. return WALK_PRED_DEFAULT;
  1299. if (pred->left != FILTER_PRED_INVALID)
  1300. return WALK_PRED_DEFAULT;
  1301. if (WARN_ON(d->count == d->children)) {
  1302. *err = -EINVAL;
  1303. return WALK_PRED_ABORT;
  1304. }
  1305. pred->index &= ~FILTER_PRED_FOLD;
  1306. root->ops[d->count++] = pred->index;
  1307. return WALK_PRED_DEFAULT;
  1308. }
  1309. static int fold_pred(struct filter_pred *preds, struct filter_pred *root)
  1310. {
  1311. struct fold_pred_data data = {
  1312. .root = root,
  1313. .count = 0,
  1314. };
  1315. int children;
  1316. /* No need to keep the fold flag */
  1317. root->index &= ~FILTER_PRED_FOLD;
  1318. /* If the root is a leaf then do nothing */
  1319. if (root->left == FILTER_PRED_INVALID)
  1320. return 0;
  1321. /* count the children */
  1322. children = count_leafs(preds, &preds[root->left]);
  1323. children += count_leafs(preds, &preds[root->right]);
  1324. root->ops = kcalloc(children, sizeof(*root->ops), GFP_KERNEL);
  1325. if (!root->ops)
  1326. return -ENOMEM;
  1327. root->val = children;
  1328. data.children = children;
  1329. return walk_pred_tree(preds, root, fold_pred_cb, &data);
  1330. }
  1331. static int fold_pred_tree_cb(enum move_type move, struct filter_pred *pred,
  1332. int *err, void *data)
  1333. {
  1334. struct filter_pred *preds = data;
  1335. if (move != MOVE_DOWN)
  1336. return WALK_PRED_DEFAULT;
  1337. if (!(pred->index & FILTER_PRED_FOLD))
  1338. return WALK_PRED_DEFAULT;
  1339. *err = fold_pred(preds, pred);
  1340. if (*err)
  1341. return WALK_PRED_ABORT;
  1342. /* eveyrhing below is folded, continue with parent */
  1343. return WALK_PRED_PARENT;
  1344. }
  1345. /*
  1346. * To optimize the processing of the ops, if we have several "ors" or
  1347. * "ands" together, we can put them in an array and process them all
  1348. * together speeding up the filter logic.
  1349. */
  1350. static int fold_pred_tree(struct event_filter *filter,
  1351. struct filter_pred *root)
  1352. {
  1353. return walk_pred_tree(filter->preds, root, fold_pred_tree_cb,
  1354. filter->preds);
  1355. }
  1356. static int replace_preds(struct trace_event_call *call,
  1357. struct event_filter *filter,
  1358. struct filter_parse_state *ps,
  1359. bool dry_run)
  1360. {
  1361. char *operand1 = NULL, *operand2 = NULL;
  1362. struct filter_pred *pred;
  1363. struct filter_pred *root;
  1364. struct postfix_elt *elt;
  1365. struct pred_stack stack = { }; /* init to NULL */
  1366. int err;
  1367. int n_preds = 0;
  1368. n_preds = count_preds(ps);
  1369. if (n_preds >= MAX_FILTER_PRED) {
  1370. parse_error(ps, FILT_ERR_TOO_MANY_PREDS, 0);
  1371. return -ENOSPC;
  1372. }
  1373. err = check_preds(ps);
  1374. if (err)
  1375. return err;
  1376. if (!dry_run) {
  1377. err = __alloc_pred_stack(&stack, n_preds);
  1378. if (err)
  1379. return err;
  1380. err = __alloc_preds(filter, n_preds);
  1381. if (err)
  1382. goto fail;
  1383. }
  1384. n_preds = 0;
  1385. list_for_each_entry(elt, &ps->postfix, list) {
  1386. if (elt->op == OP_NONE) {
  1387. if (!operand1)
  1388. operand1 = elt->operand;
  1389. else if (!operand2)
  1390. operand2 = elt->operand;
  1391. else {
  1392. parse_error(ps, FILT_ERR_TOO_MANY_OPERANDS, 0);
  1393. err = -EINVAL;
  1394. goto fail;
  1395. }
  1396. continue;
  1397. }
  1398. if (elt->op == OP_NOT) {
  1399. if (!n_preds || operand1 || operand2) {
  1400. parse_error(ps, FILT_ERR_ILLEGAL_NOT_OP, 0);
  1401. err = -EINVAL;
  1402. goto fail;
  1403. }
  1404. if (!dry_run)
  1405. filter->preds[n_preds - 1].not ^= 1;
  1406. continue;
  1407. }
  1408. if (WARN_ON(n_preds++ == MAX_FILTER_PRED)) {
  1409. parse_error(ps, FILT_ERR_TOO_MANY_PREDS, 0);
  1410. err = -ENOSPC;
  1411. goto fail;
  1412. }
  1413. pred = create_pred(ps, call, elt->op, operand1, operand2);
  1414. if (!pred) {
  1415. err = -EINVAL;
  1416. goto fail;
  1417. }
  1418. if (!dry_run) {
  1419. err = filter_add_pred(ps, filter, pred, &stack);
  1420. if (err)
  1421. goto fail;
  1422. }
  1423. operand1 = operand2 = NULL;
  1424. }
  1425. if (!dry_run) {
  1426. /* We should have one item left on the stack */
  1427. pred = __pop_pred_stack(&stack);
  1428. if (!pred)
  1429. return -EINVAL;
  1430. /* This item is where we start from in matching */
  1431. root = pred;
  1432. /* Make sure the stack is empty */
  1433. pred = __pop_pred_stack(&stack);
  1434. if (WARN_ON(pred)) {
  1435. err = -EINVAL;
  1436. filter->root = NULL;
  1437. goto fail;
  1438. }
  1439. err = check_pred_tree(filter, root);
  1440. if (err)
  1441. goto fail;
  1442. /* Optimize the tree */
  1443. err = fold_pred_tree(filter, root);
  1444. if (err)
  1445. goto fail;
  1446. /* We don't set root until we know it works */
  1447. barrier();
  1448. filter->root = root;
  1449. }
  1450. err = 0;
  1451. fail:
  1452. __free_pred_stack(&stack);
  1453. return err;
  1454. }
  1455. static inline void event_set_filtered_flag(struct trace_event_file *file)
  1456. {
  1457. struct trace_event_call *call = file->event_call;
  1458. if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
  1459. call->flags |= TRACE_EVENT_FL_FILTERED;
  1460. else
  1461. file->flags |= EVENT_FILE_FL_FILTERED;
  1462. }
  1463. static inline void event_set_filter(struct trace_event_file *file,
  1464. struct event_filter *filter)
  1465. {
  1466. struct trace_event_call *call = file->event_call;
  1467. if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
  1468. rcu_assign_pointer(call->filter, filter);
  1469. else
  1470. rcu_assign_pointer(file->filter, filter);
  1471. }
  1472. static inline void event_clear_filter(struct trace_event_file *file)
  1473. {
  1474. struct trace_event_call *call = file->event_call;
  1475. if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
  1476. RCU_INIT_POINTER(call->filter, NULL);
  1477. else
  1478. RCU_INIT_POINTER(file->filter, NULL);
  1479. }
  1480. static inline void
  1481. event_set_no_set_filter_flag(struct trace_event_file *file)
  1482. {
  1483. struct trace_event_call *call = file->event_call;
  1484. if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
  1485. call->flags |= TRACE_EVENT_FL_NO_SET_FILTER;
  1486. else
  1487. file->flags |= EVENT_FILE_FL_NO_SET_FILTER;
  1488. }
  1489. static inline void
  1490. event_clear_no_set_filter_flag(struct trace_event_file *file)
  1491. {
  1492. struct trace_event_call *call = file->event_call;
  1493. if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
  1494. call->flags &= ~TRACE_EVENT_FL_NO_SET_FILTER;
  1495. else
  1496. file->flags &= ~EVENT_FILE_FL_NO_SET_FILTER;
  1497. }
  1498. static inline bool
  1499. event_no_set_filter_flag(struct trace_event_file *file)
  1500. {
  1501. struct trace_event_call *call = file->event_call;
  1502. if (file->flags & EVENT_FILE_FL_NO_SET_FILTER)
  1503. return true;
  1504. if ((call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) &&
  1505. (call->flags & TRACE_EVENT_FL_NO_SET_FILTER))
  1506. return true;
  1507. return false;
  1508. }
  1509. struct filter_list {
  1510. struct list_head list;
  1511. struct event_filter *filter;
  1512. };
  1513. static int replace_system_preds(struct trace_subsystem_dir *dir,
  1514. struct trace_array *tr,
  1515. struct filter_parse_state *ps,
  1516. char *filter_string)
  1517. {
  1518. struct trace_event_file *file;
  1519. struct filter_list *filter_item;
  1520. struct filter_list *tmp;
  1521. LIST_HEAD(filter_list);
  1522. bool fail = true;
  1523. int err;
  1524. list_for_each_entry(file, &tr->events, list) {
  1525. if (file->system != dir)
  1526. continue;
  1527. /*
  1528. * Try to see if the filter can be applied
  1529. * (filter arg is ignored on dry_run)
  1530. */
  1531. err = replace_preds(file->event_call, NULL, ps, true);
  1532. if (err)
  1533. event_set_no_set_filter_flag(file);
  1534. else
  1535. event_clear_no_set_filter_flag(file);
  1536. }
  1537. list_for_each_entry(file, &tr->events, list) {
  1538. struct event_filter *filter;
  1539. if (file->system != dir)
  1540. continue;
  1541. if (event_no_set_filter_flag(file))
  1542. continue;
  1543. filter_item = kzalloc(sizeof(*filter_item), GFP_KERNEL);
  1544. if (!filter_item)
  1545. goto fail_mem;
  1546. list_add_tail(&filter_item->list, &filter_list);
  1547. filter_item->filter = __alloc_filter();
  1548. if (!filter_item->filter)
  1549. goto fail_mem;
  1550. filter = filter_item->filter;
  1551. /* Can only fail on no memory */
  1552. err = replace_filter_string(filter, filter_string);
  1553. if (err)
  1554. goto fail_mem;
  1555. err = replace_preds(file->event_call, filter, ps, false);
  1556. if (err) {
  1557. filter_disable(file);
  1558. parse_error(ps, FILT_ERR_BAD_SUBSYS_FILTER, 0);
  1559. append_filter_err(ps, filter);
  1560. } else
  1561. event_set_filtered_flag(file);
  1562. /*
  1563. * Regardless of if this returned an error, we still
  1564. * replace the filter for the call.
  1565. */
  1566. filter = event_filter(file);
  1567. event_set_filter(file, filter_item->filter);
  1568. filter_item->filter = filter;
  1569. fail = false;
  1570. }
  1571. if (fail)
  1572. goto fail;
  1573. /*
  1574. * The calls can still be using the old filters.
  1575. * Do a synchronize_sched() to ensure all calls are
  1576. * done with them before we free them.
  1577. */
  1578. synchronize_sched();
  1579. list_for_each_entry_safe(filter_item, tmp, &filter_list, list) {
  1580. __free_filter(filter_item->filter);
  1581. list_del(&filter_item->list);
  1582. kfree(filter_item);
  1583. }
  1584. return 0;
  1585. fail:
  1586. /* No call succeeded */
  1587. list_for_each_entry_safe(filter_item, tmp, &filter_list, list) {
  1588. list_del(&filter_item->list);
  1589. kfree(filter_item);
  1590. }
  1591. parse_error(ps, FILT_ERR_BAD_SUBSYS_FILTER, 0);
  1592. return -EINVAL;
  1593. fail_mem:
  1594. /* If any call succeeded, we still need to sync */
  1595. if (!fail)
  1596. synchronize_sched();
  1597. list_for_each_entry_safe(filter_item, tmp, &filter_list, list) {
  1598. __free_filter(filter_item->filter);
  1599. list_del(&filter_item->list);
  1600. kfree(filter_item);
  1601. }
  1602. return -ENOMEM;
  1603. }
  1604. static int create_filter_start(char *filter_str, bool set_str,
  1605. struct filter_parse_state **psp,
  1606. struct event_filter **filterp)
  1607. {
  1608. struct event_filter *filter;
  1609. struct filter_parse_state *ps = NULL;
  1610. int err = 0;
  1611. WARN_ON_ONCE(*psp || *filterp);
  1612. /* allocate everything, and if any fails, free all and fail */
  1613. filter = __alloc_filter();
  1614. if (filter && set_str)
  1615. err = replace_filter_string(filter, filter_str);
  1616. ps = kzalloc(sizeof(*ps), GFP_KERNEL);
  1617. if (!filter || !ps || err) {
  1618. kfree(ps);
  1619. __free_filter(filter);
  1620. return -ENOMEM;
  1621. }
  1622. /* we're committed to creating a new filter */
  1623. *filterp = filter;
  1624. *psp = ps;
  1625. parse_init(ps, filter_ops, filter_str);
  1626. err = filter_parse(ps);
  1627. if (err && set_str)
  1628. append_filter_err(ps, filter);
  1629. return err;
  1630. }
  1631. static void create_filter_finish(struct filter_parse_state *ps)
  1632. {
  1633. if (ps) {
  1634. filter_opstack_clear(ps);
  1635. postfix_clear(ps);
  1636. kfree(ps);
  1637. }
  1638. }
  1639. /**
  1640. * create_filter - create a filter for a trace_event_call
  1641. * @call: trace_event_call to create a filter for
  1642. * @filter_str: filter string
  1643. * @set_str: remember @filter_str and enable detailed error in filter
  1644. * @filterp: out param for created filter (always updated on return)
  1645. *
  1646. * Creates a filter for @call with @filter_str. If @set_str is %true,
  1647. * @filter_str is copied and recorded in the new filter.
  1648. *
  1649. * On success, returns 0 and *@filterp points to the new filter. On
  1650. * failure, returns -errno and *@filterp may point to %NULL or to a new
  1651. * filter. In the latter case, the returned filter contains error
  1652. * information if @set_str is %true and the caller is responsible for
  1653. * freeing it.
  1654. */
  1655. static int create_filter(struct trace_event_call *call,
  1656. char *filter_str, bool set_str,
  1657. struct event_filter **filterp)
  1658. {
  1659. struct event_filter *filter = NULL;
  1660. struct filter_parse_state *ps = NULL;
  1661. int err;
  1662. err = create_filter_start(filter_str, set_str, &ps, &filter);
  1663. if (!err) {
  1664. err = replace_preds(call, filter, ps, false);
  1665. if (err && set_str)
  1666. append_filter_err(ps, filter);
  1667. }
  1668. if (err && !set_str) {
  1669. free_event_filter(filter);
  1670. filter = NULL;
  1671. }
  1672. create_filter_finish(ps);
  1673. *filterp = filter;
  1674. return err;
  1675. }
  1676. int create_event_filter(struct trace_event_call *call,
  1677. char *filter_str, bool set_str,
  1678. struct event_filter **filterp)
  1679. {
  1680. return create_filter(call, filter_str, set_str, filterp);
  1681. }
  1682. /**
  1683. * create_system_filter - create a filter for an event_subsystem
  1684. * @system: event_subsystem to create a filter for
  1685. * @filter_str: filter string
  1686. * @filterp: out param for created filter (always updated on return)
  1687. *
  1688. * Identical to create_filter() except that it creates a subsystem filter
  1689. * and always remembers @filter_str.
  1690. */
  1691. static int create_system_filter(struct trace_subsystem_dir *dir,
  1692. struct trace_array *tr,
  1693. char *filter_str, struct event_filter **filterp)
  1694. {
  1695. struct event_filter *filter = NULL;
  1696. struct filter_parse_state *ps = NULL;
  1697. int err;
  1698. err = create_filter_start(filter_str, true, &ps, &filter);
  1699. if (!err) {
  1700. err = replace_system_preds(dir, tr, ps, filter_str);
  1701. if (!err) {
  1702. /* System filters just show a default message */
  1703. kfree(filter->filter_string);
  1704. filter->filter_string = NULL;
  1705. } else {
  1706. append_filter_err(ps, filter);
  1707. }
  1708. }
  1709. create_filter_finish(ps);
  1710. *filterp = filter;
  1711. return err;
  1712. }
  1713. /* caller must hold event_mutex */
  1714. int apply_event_filter(struct trace_event_file *file, char *filter_string)
  1715. {
  1716. struct trace_event_call *call = file->event_call;
  1717. struct event_filter *filter;
  1718. int err;
  1719. if (!strcmp(strstrip(filter_string), "0")) {
  1720. filter_disable(file);
  1721. filter = event_filter(file);
  1722. if (!filter)
  1723. return 0;
  1724. event_clear_filter(file);
  1725. /* Make sure the filter is not being used */
  1726. synchronize_sched();
  1727. __free_filter(filter);
  1728. return 0;
  1729. }
  1730. err = create_filter(call, filter_string, true, &filter);
  1731. /*
  1732. * Always swap the call filter with the new filter
  1733. * even if there was an error. If there was an error
  1734. * in the filter, we disable the filter and show the error
  1735. * string
  1736. */
  1737. if (filter) {
  1738. struct event_filter *tmp;
  1739. tmp = event_filter(file);
  1740. if (!err)
  1741. event_set_filtered_flag(file);
  1742. else
  1743. filter_disable(file);
  1744. event_set_filter(file, filter);
  1745. if (tmp) {
  1746. /* Make sure the call is done with the filter */
  1747. synchronize_sched();
  1748. __free_filter(tmp);
  1749. }
  1750. }
  1751. return err;
  1752. }
  1753. int apply_subsystem_event_filter(struct trace_subsystem_dir *dir,
  1754. char *filter_string)
  1755. {
  1756. struct event_subsystem *system = dir->subsystem;
  1757. struct trace_array *tr = dir->tr;
  1758. struct event_filter *filter;
  1759. int err = 0;
  1760. mutex_lock(&event_mutex);
  1761. /* Make sure the system still has events */
  1762. if (!dir->nr_events) {
  1763. err = -ENODEV;
  1764. goto out_unlock;
  1765. }
  1766. if (!strcmp(strstrip(filter_string), "0")) {
  1767. filter_free_subsystem_preds(dir, tr);
  1768. remove_filter_string(system->filter);
  1769. filter = system->filter;
  1770. system->filter = NULL;
  1771. /* Ensure all filters are no longer used */
  1772. synchronize_sched();
  1773. filter_free_subsystem_filters(dir, tr);
  1774. __free_filter(filter);
  1775. goto out_unlock;
  1776. }
  1777. err = create_system_filter(dir, tr, filter_string, &filter);
  1778. if (filter) {
  1779. /*
  1780. * No event actually uses the system filter
  1781. * we can free it without synchronize_sched().
  1782. */
  1783. __free_filter(system->filter);
  1784. system->filter = filter;
  1785. }
  1786. out_unlock:
  1787. mutex_unlock(&event_mutex);
  1788. return err;
  1789. }
  1790. #ifdef CONFIG_PERF_EVENTS
  1791. void ftrace_profile_free_filter(struct perf_event *event)
  1792. {
  1793. struct event_filter *filter = event->filter;
  1794. event->filter = NULL;
  1795. __free_filter(filter);
  1796. }
  1797. struct function_filter_data {
  1798. struct ftrace_ops *ops;
  1799. int first_filter;
  1800. int first_notrace;
  1801. };
  1802. #ifdef CONFIG_FUNCTION_TRACER
  1803. static char **
  1804. ftrace_function_filter_re(char *buf, int len, int *count)
  1805. {
  1806. char *str, **re;
  1807. str = kstrndup(buf, len, GFP_KERNEL);
  1808. if (!str)
  1809. return NULL;
  1810. /*
  1811. * The argv_split function takes white space
  1812. * as a separator, so convert ',' into spaces.
  1813. */
  1814. strreplace(str, ',', ' ');
  1815. re = argv_split(GFP_KERNEL, str, count);
  1816. kfree(str);
  1817. return re;
  1818. }
  1819. static int ftrace_function_set_regexp(struct ftrace_ops *ops, int filter,
  1820. int reset, char *re, int len)
  1821. {
  1822. int ret;
  1823. if (filter)
  1824. ret = ftrace_set_filter(ops, re, len, reset);
  1825. else
  1826. ret = ftrace_set_notrace(ops, re, len, reset);
  1827. return ret;
  1828. }
  1829. static int __ftrace_function_set_filter(int filter, char *buf, int len,
  1830. struct function_filter_data *data)
  1831. {
  1832. int i, re_cnt, ret = -EINVAL;
  1833. int *reset;
  1834. char **re;
  1835. reset = filter ? &data->first_filter : &data->first_notrace;
  1836. /*
  1837. * The 'ip' field could have multiple filters set, separated
  1838. * either by space or comma. We first cut the filter and apply
  1839. * all pieces separatelly.
  1840. */
  1841. re = ftrace_function_filter_re(buf, len, &re_cnt);
  1842. if (!re)
  1843. return -EINVAL;
  1844. for (i = 0; i < re_cnt; i++) {
  1845. ret = ftrace_function_set_regexp(data->ops, filter, *reset,
  1846. re[i], strlen(re[i]));
  1847. if (ret)
  1848. break;
  1849. if (*reset)
  1850. *reset = 0;
  1851. }
  1852. argv_free(re);
  1853. return ret;
  1854. }
  1855. static int ftrace_function_check_pred(struct filter_pred *pred, int leaf)
  1856. {
  1857. struct ftrace_event_field *field = pred->field;
  1858. if (leaf) {
  1859. /*
  1860. * Check the leaf predicate for function trace, verify:
  1861. * - only '==' and '!=' is used
  1862. * - the 'ip' field is used
  1863. */
  1864. if ((pred->op != OP_EQ) && (pred->op != OP_NE))
  1865. return -EINVAL;
  1866. if (strcmp(field->name, "ip"))
  1867. return -EINVAL;
  1868. } else {
  1869. /*
  1870. * Check the non leaf predicate for function trace, verify:
  1871. * - only '||' is used
  1872. */
  1873. if (pred->op != OP_OR)
  1874. return -EINVAL;
  1875. }
  1876. return 0;
  1877. }
  1878. static int ftrace_function_set_filter_cb(enum move_type move,
  1879. struct filter_pred *pred,
  1880. int *err, void *data)
  1881. {
  1882. /* Checking the node is valid for function trace. */
  1883. if ((move != MOVE_DOWN) ||
  1884. (pred->left != FILTER_PRED_INVALID)) {
  1885. *err = ftrace_function_check_pred(pred, 0);
  1886. } else {
  1887. *err = ftrace_function_check_pred(pred, 1);
  1888. if (*err)
  1889. return WALK_PRED_ABORT;
  1890. *err = __ftrace_function_set_filter(pred->op == OP_EQ,
  1891. pred->regex.pattern,
  1892. pred->regex.len,
  1893. data);
  1894. }
  1895. return (*err) ? WALK_PRED_ABORT : WALK_PRED_DEFAULT;
  1896. }
  1897. static int ftrace_function_set_filter(struct perf_event *event,
  1898. struct event_filter *filter)
  1899. {
  1900. struct function_filter_data data = {
  1901. .first_filter = 1,
  1902. .first_notrace = 1,
  1903. .ops = &event->ftrace_ops,
  1904. };
  1905. return walk_pred_tree(filter->preds, filter->root,
  1906. ftrace_function_set_filter_cb, &data);
  1907. }
  1908. #else
  1909. static int ftrace_function_set_filter(struct perf_event *event,
  1910. struct event_filter *filter)
  1911. {
  1912. return -ENODEV;
  1913. }
  1914. #endif /* CONFIG_FUNCTION_TRACER */
  1915. int ftrace_profile_set_filter(struct perf_event *event, int event_id,
  1916. char *filter_str)
  1917. {
  1918. int err;
  1919. struct event_filter *filter;
  1920. struct trace_event_call *call;
  1921. mutex_lock(&event_mutex);
  1922. call = event->tp_event;
  1923. err = -EINVAL;
  1924. if (!call)
  1925. goto out_unlock;
  1926. err = -EEXIST;
  1927. if (event->filter)
  1928. goto out_unlock;
  1929. err = create_filter(call, filter_str, false, &filter);
  1930. if (err)
  1931. goto free_filter;
  1932. if (ftrace_event_is_function(call))
  1933. err = ftrace_function_set_filter(event, filter);
  1934. else
  1935. event->filter = filter;
  1936. free_filter:
  1937. if (err || ftrace_event_is_function(call))
  1938. __free_filter(filter);
  1939. out_unlock:
  1940. mutex_unlock(&event_mutex);
  1941. return err;
  1942. }
  1943. #endif /* CONFIG_PERF_EVENTS */
  1944. #ifdef CONFIG_FTRACE_STARTUP_TEST
  1945. #include <linux/types.h>
  1946. #include <linux/tracepoint.h>
  1947. #define CREATE_TRACE_POINTS
  1948. #include "trace_events_filter_test.h"
  1949. #define DATA_REC(m, va, vb, vc, vd, ve, vf, vg, vh, nvisit) \
  1950. { \
  1951. .filter = FILTER, \
  1952. .rec = { .a = va, .b = vb, .c = vc, .d = vd, \
  1953. .e = ve, .f = vf, .g = vg, .h = vh }, \
  1954. .match = m, \
  1955. .not_visited = nvisit, \
  1956. }
  1957. #define YES 1
  1958. #define NO 0
  1959. static struct test_filter_data_t {
  1960. char *filter;
  1961. struct trace_event_raw_ftrace_test_filter rec;
  1962. int match;
  1963. char *not_visited;
  1964. } test_filter_data[] = {
  1965. #define FILTER "a == 1 && b == 1 && c == 1 && d == 1 && " \
  1966. "e == 1 && f == 1 && g == 1 && h == 1"
  1967. DATA_REC(YES, 1, 1, 1, 1, 1, 1, 1, 1, ""),
  1968. DATA_REC(NO, 0, 1, 1, 1, 1, 1, 1, 1, "bcdefgh"),
  1969. DATA_REC(NO, 1, 1, 1, 1, 1, 1, 1, 0, ""),
  1970. #undef FILTER
  1971. #define FILTER "a == 1 || b == 1 || c == 1 || d == 1 || " \
  1972. "e == 1 || f == 1 || g == 1 || h == 1"
  1973. DATA_REC(NO, 0, 0, 0, 0, 0, 0, 0, 0, ""),
  1974. DATA_REC(YES, 0, 0, 0, 0, 0, 0, 0, 1, ""),
  1975. DATA_REC(YES, 1, 0, 0, 0, 0, 0, 0, 0, "bcdefgh"),
  1976. #undef FILTER
  1977. #define FILTER "(a == 1 || b == 1) && (c == 1 || d == 1) && " \
  1978. "(e == 1 || f == 1) && (g == 1 || h == 1)"
  1979. DATA_REC(NO, 0, 0, 1, 1, 1, 1, 1, 1, "dfh"),
  1980. DATA_REC(YES, 0, 1, 0, 1, 0, 1, 0, 1, ""),
  1981. DATA_REC(YES, 1, 0, 1, 0, 0, 1, 0, 1, "bd"),
  1982. DATA_REC(NO, 1, 0, 1, 0, 0, 1, 0, 0, "bd"),
  1983. #undef FILTER
  1984. #define FILTER "(a == 1 && b == 1) || (c == 1 && d == 1) || " \
  1985. "(e == 1 && f == 1) || (g == 1 && h == 1)"
  1986. DATA_REC(YES, 1, 0, 1, 1, 1, 1, 1, 1, "efgh"),
  1987. DATA_REC(YES, 0, 0, 0, 0, 0, 0, 1, 1, ""),
  1988. DATA_REC(NO, 0, 0, 0, 0, 0, 0, 0, 1, ""),
  1989. #undef FILTER
  1990. #define FILTER "(a == 1 && b == 1) && (c == 1 && d == 1) && " \
  1991. "(e == 1 && f == 1) || (g == 1 && h == 1)"
  1992. DATA_REC(YES, 1, 1, 1, 1, 1, 1, 0, 0, "gh"),
  1993. DATA_REC(NO, 0, 0, 0, 0, 0, 0, 0, 1, ""),
  1994. DATA_REC(YES, 1, 1, 1, 1, 1, 0, 1, 1, ""),
  1995. #undef FILTER
  1996. #define FILTER "((a == 1 || b == 1) || (c == 1 || d == 1) || " \
  1997. "(e == 1 || f == 1)) && (g == 1 || h == 1)"
  1998. DATA_REC(YES, 1, 1, 1, 1, 1, 1, 0, 1, "bcdef"),
  1999. DATA_REC(NO, 0, 0, 0, 0, 0, 0, 0, 0, ""),
  2000. DATA_REC(YES, 1, 1, 1, 1, 1, 0, 1, 1, "h"),
  2001. #undef FILTER
  2002. #define FILTER "((((((((a == 1) && (b == 1)) || (c == 1)) && (d == 1)) || " \
  2003. "(e == 1)) && (f == 1)) || (g == 1)) && (h == 1))"
  2004. DATA_REC(YES, 1, 1, 1, 1, 1, 1, 1, 1, "ceg"),
  2005. DATA_REC(NO, 0, 1, 0, 1, 0, 1, 0, 1, ""),
  2006. DATA_REC(NO, 1, 0, 1, 0, 1, 0, 1, 0, ""),
  2007. #undef FILTER
  2008. #define FILTER "((((((((a == 1) || (b == 1)) && (c == 1)) || (d == 1)) && " \
  2009. "(e == 1)) || (f == 1)) && (g == 1)) || (h == 1))"
  2010. DATA_REC(YES, 1, 1, 1, 1, 1, 1, 1, 1, "bdfh"),
  2011. DATA_REC(YES, 0, 1, 0, 1, 0, 1, 0, 1, ""),
  2012. DATA_REC(YES, 1, 0, 1, 0, 1, 0, 1, 0, "bdfh"),
  2013. };
  2014. #undef DATA_REC
  2015. #undef FILTER
  2016. #undef YES
  2017. #undef NO
  2018. #define DATA_CNT (sizeof(test_filter_data)/sizeof(struct test_filter_data_t))
  2019. static int test_pred_visited;
  2020. static int test_pred_visited_fn(struct filter_pred *pred, void *event)
  2021. {
  2022. struct ftrace_event_field *field = pred->field;
  2023. test_pred_visited = 1;
  2024. printk(KERN_INFO "\npred visited %s\n", field->name);
  2025. return 1;
  2026. }
  2027. static int test_walk_pred_cb(enum move_type move, struct filter_pred *pred,
  2028. int *err, void *data)
  2029. {
  2030. char *fields = data;
  2031. if ((move == MOVE_DOWN) &&
  2032. (pred->left == FILTER_PRED_INVALID)) {
  2033. struct ftrace_event_field *field = pred->field;
  2034. if (!field) {
  2035. WARN(1, "all leafs should have field defined");
  2036. return WALK_PRED_DEFAULT;
  2037. }
  2038. if (!strchr(fields, *field->name))
  2039. return WALK_PRED_DEFAULT;
  2040. WARN_ON(!pred->fn);
  2041. pred->fn = test_pred_visited_fn;
  2042. }
  2043. return WALK_PRED_DEFAULT;
  2044. }
  2045. static __init int ftrace_test_event_filter(void)
  2046. {
  2047. int i;
  2048. printk(KERN_INFO "Testing ftrace filter: ");
  2049. for (i = 0; i < DATA_CNT; i++) {
  2050. struct event_filter *filter = NULL;
  2051. struct test_filter_data_t *d = &test_filter_data[i];
  2052. int err;
  2053. err = create_filter(&event_ftrace_test_filter, d->filter,
  2054. false, &filter);
  2055. if (err) {
  2056. printk(KERN_INFO
  2057. "Failed to get filter for '%s', err %d\n",
  2058. d->filter, err);
  2059. __free_filter(filter);
  2060. break;
  2061. }
  2062. /*
  2063. * The preemption disabling is not really needed for self
  2064. * tests, but the rcu dereference will complain without it.
  2065. */
  2066. preempt_disable();
  2067. if (*d->not_visited)
  2068. walk_pred_tree(filter->preds, filter->root,
  2069. test_walk_pred_cb,
  2070. d->not_visited);
  2071. test_pred_visited = 0;
  2072. err = filter_match_preds(filter, &d->rec);
  2073. preempt_enable();
  2074. __free_filter(filter);
  2075. if (test_pred_visited) {
  2076. printk(KERN_INFO
  2077. "Failed, unwanted pred visited for filter %s\n",
  2078. d->filter);
  2079. break;
  2080. }
  2081. if (err != d->match) {
  2082. printk(KERN_INFO
  2083. "Failed to match filter '%s', expected %d\n",
  2084. d->filter, d->match);
  2085. break;
  2086. }
  2087. }
  2088. if (i == DATA_CNT)
  2089. printk(KERN_CONT "OK\n");
  2090. return 0;
  2091. }
  2092. late_initcall(ftrace_test_event_filter);
  2093. #endif /* CONFIG_FTRACE_STARTUP_TEST */