map.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855
  1. #include "symbol.h"
  2. #include <errno.h>
  3. #include <inttypes.h>
  4. #include <limits.h>
  5. #include <stdlib.h>
  6. #include <string.h>
  7. #include <stdio.h>
  8. #include <unistd.h>
  9. #include "map.h"
  10. #include "thread.h"
  11. #include "strlist.h"
  12. #include "vdso.h"
  13. #include "build-id.h"
  14. #include "util.h"
  15. #include "debug.h"
  16. #include "machine.h"
  17. #include <linux/string.h>
  18. static void __maps__insert(struct maps *maps, struct map *map);
  19. const char *map_type__name[MAP__NR_TYPES] = {
  20. [MAP__FUNCTION] = "Functions",
  21. [MAP__VARIABLE] = "Variables",
  22. };
  23. static inline int is_anon_memory(const char *filename)
  24. {
  25. return !strcmp(filename, "//anon") ||
  26. !strcmp(filename, "/dev/zero (deleted)") ||
  27. !strcmp(filename, "/anon_hugepage (deleted)");
  28. }
  29. static inline int is_no_dso_memory(const char *filename)
  30. {
  31. return !strncmp(filename, "[stack", 6) ||
  32. !strncmp(filename, "/SYSV",5) ||
  33. !strcmp(filename, "[heap]");
  34. }
  35. static inline int is_android_lib(const char *filename)
  36. {
  37. return !strncmp(filename, "/data/app-lib", 13) ||
  38. !strncmp(filename, "/system/lib", 11);
  39. }
  40. static inline bool replace_android_lib(const char *filename, char *newfilename)
  41. {
  42. const char *libname;
  43. char *app_abi;
  44. size_t app_abi_length, new_length;
  45. size_t lib_length = 0;
  46. libname = strrchr(filename, '/');
  47. if (libname)
  48. lib_length = strlen(libname);
  49. app_abi = getenv("APP_ABI");
  50. if (!app_abi)
  51. return false;
  52. app_abi_length = strlen(app_abi);
  53. if (!strncmp(filename, "/data/app-lib", 13)) {
  54. char *apk_path;
  55. if (!app_abi_length)
  56. return false;
  57. new_length = 7 + app_abi_length + lib_length;
  58. apk_path = getenv("APK_PATH");
  59. if (apk_path) {
  60. new_length += strlen(apk_path) + 1;
  61. if (new_length > PATH_MAX)
  62. return false;
  63. snprintf(newfilename, new_length,
  64. "%s/libs/%s/%s", apk_path, app_abi, libname);
  65. } else {
  66. if (new_length > PATH_MAX)
  67. return false;
  68. snprintf(newfilename, new_length,
  69. "libs/%s/%s", app_abi, libname);
  70. }
  71. return true;
  72. }
  73. if (!strncmp(filename, "/system/lib/", 11)) {
  74. char *ndk, *app;
  75. const char *arch;
  76. size_t ndk_length;
  77. size_t app_length;
  78. ndk = getenv("NDK_ROOT");
  79. app = getenv("APP_PLATFORM");
  80. if (!(ndk && app))
  81. return false;
  82. ndk_length = strlen(ndk);
  83. app_length = strlen(app);
  84. if (!(ndk_length && app_length && app_abi_length))
  85. return false;
  86. arch = !strncmp(app_abi, "arm", 3) ? "arm" :
  87. !strncmp(app_abi, "mips", 4) ? "mips" :
  88. !strncmp(app_abi, "x86", 3) ? "x86" : NULL;
  89. if (!arch)
  90. return false;
  91. new_length = 27 + ndk_length +
  92. app_length + lib_length
  93. + strlen(arch);
  94. if (new_length > PATH_MAX)
  95. return false;
  96. snprintf(newfilename, new_length,
  97. "%s/platforms/%s/arch-%s/usr/lib/%s",
  98. ndk, app, arch, libname);
  99. return true;
  100. }
  101. return false;
  102. }
  103. void map__init(struct map *map, enum map_type type,
  104. u64 start, u64 end, u64 pgoff, struct dso *dso)
  105. {
  106. map->type = type;
  107. map->start = start;
  108. map->end = end;
  109. map->pgoff = pgoff;
  110. map->reloc = 0;
  111. map->dso = dso__get(dso);
  112. map->map_ip = map__map_ip;
  113. map->unmap_ip = map__unmap_ip;
  114. RB_CLEAR_NODE(&map->rb_node);
  115. map->groups = NULL;
  116. map->erange_warned = false;
  117. atomic_set(&map->refcnt, 1);
  118. }
  119. struct map *map__new(struct machine *machine, u64 start, u64 len,
  120. u64 pgoff, u32 pid, u32 d_maj, u32 d_min, u64 ino,
  121. u64 ino_gen, u32 prot, u32 flags, char *filename,
  122. enum map_type type, struct thread *thread)
  123. {
  124. struct map *map = malloc(sizeof(*map));
  125. if (map != NULL) {
  126. char newfilename[PATH_MAX];
  127. struct dso *dso;
  128. int anon, no_dso, vdso, android;
  129. android = is_android_lib(filename);
  130. anon = is_anon_memory(filename);
  131. vdso = is_vdso_map(filename);
  132. no_dso = is_no_dso_memory(filename);
  133. map->maj = d_maj;
  134. map->min = d_min;
  135. map->ino = ino;
  136. map->ino_generation = ino_gen;
  137. map->prot = prot;
  138. map->flags = flags;
  139. if ((anon || no_dso) && type == MAP__FUNCTION) {
  140. snprintf(newfilename, sizeof(newfilename), "/tmp/perf-%d.map", pid);
  141. filename = newfilename;
  142. }
  143. if (android) {
  144. if (replace_android_lib(filename, newfilename))
  145. filename = newfilename;
  146. }
  147. if (vdso) {
  148. pgoff = 0;
  149. dso = machine__findnew_vdso(machine, thread);
  150. } else
  151. dso = machine__findnew_dso(machine, filename);
  152. if (dso == NULL)
  153. goto out_delete;
  154. map__init(map, type, start, start + len, pgoff, dso);
  155. if (anon || no_dso) {
  156. map->map_ip = map->unmap_ip = identity__map_ip;
  157. /*
  158. * Set memory without DSO as loaded. All map__find_*
  159. * functions still return NULL, and we avoid the
  160. * unnecessary map__load warning.
  161. */
  162. if (type != MAP__FUNCTION)
  163. dso__set_loaded(dso, map->type);
  164. }
  165. dso__put(dso);
  166. }
  167. return map;
  168. out_delete:
  169. free(map);
  170. return NULL;
  171. }
  172. /*
  173. * Constructor variant for modules (where we know from /proc/modules where
  174. * they are loaded) and for vmlinux, where only after we load all the
  175. * symbols we'll know where it starts and ends.
  176. */
  177. struct map *map__new2(u64 start, struct dso *dso, enum map_type type)
  178. {
  179. struct map *map = calloc(1, (sizeof(*map) +
  180. (dso->kernel ? sizeof(struct kmap) : 0)));
  181. if (map != NULL) {
  182. /*
  183. * ->end will be filled after we load all the symbols
  184. */
  185. map__init(map, type, start, 0, 0, dso);
  186. }
  187. return map;
  188. }
  189. /*
  190. * Use this and __map__is_kmodule() for map instances that are in
  191. * machine->kmaps, and thus have map->groups->machine all properly set, to
  192. * disambiguate between the kernel and modules.
  193. *
  194. * When the need arises, introduce map__is_{kernel,kmodule)() that
  195. * checks (map->groups != NULL && map->groups->machine != NULL &&
  196. * map->dso->kernel) before calling __map__is_{kernel,kmodule}())
  197. */
  198. bool __map__is_kernel(const struct map *map)
  199. {
  200. return __machine__kernel_map(map->groups->machine, map->type) == map;
  201. }
  202. static void map__exit(struct map *map)
  203. {
  204. BUG_ON(!RB_EMPTY_NODE(&map->rb_node));
  205. dso__zput(map->dso);
  206. }
  207. void map__delete(struct map *map)
  208. {
  209. map__exit(map);
  210. free(map);
  211. }
  212. void map__put(struct map *map)
  213. {
  214. if (map && atomic_dec_and_test(&map->refcnt))
  215. map__delete(map);
  216. }
  217. void map__fixup_start(struct map *map)
  218. {
  219. struct rb_root *symbols = &map->dso->symbols[map->type];
  220. struct rb_node *nd = rb_first(symbols);
  221. if (nd != NULL) {
  222. struct symbol *sym = rb_entry(nd, struct symbol, rb_node);
  223. map->start = sym->start;
  224. }
  225. }
  226. void map__fixup_end(struct map *map)
  227. {
  228. struct rb_root *symbols = &map->dso->symbols[map->type];
  229. struct rb_node *nd = rb_last(symbols);
  230. if (nd != NULL) {
  231. struct symbol *sym = rb_entry(nd, struct symbol, rb_node);
  232. map->end = sym->end;
  233. }
  234. }
  235. #define DSO__DELETED "(deleted)"
  236. int map__load(struct map *map, symbol_filter_t filter)
  237. {
  238. const char *name = map->dso->long_name;
  239. int nr;
  240. if (dso__loaded(map->dso, map->type))
  241. return 0;
  242. nr = dso__load(map->dso, map, filter);
  243. if (nr < 0) {
  244. if (map->dso->has_build_id) {
  245. char sbuild_id[BUILD_ID_SIZE * 2 + 1];
  246. build_id__sprintf(map->dso->build_id,
  247. sizeof(map->dso->build_id),
  248. sbuild_id);
  249. pr_warning("%s with build id %s not found",
  250. name, sbuild_id);
  251. } else
  252. pr_warning("Failed to open %s", name);
  253. pr_warning(", continuing without symbols\n");
  254. return -1;
  255. } else if (nr == 0) {
  256. #ifdef HAVE_LIBELF_SUPPORT
  257. const size_t len = strlen(name);
  258. const size_t real_len = len - sizeof(DSO__DELETED);
  259. if (len > sizeof(DSO__DELETED) &&
  260. strcmp(name + real_len + 1, DSO__DELETED) == 0) {
  261. pr_warning("%.*s was updated (is prelink enabled?). "
  262. "Restart the long running apps that use it!\n",
  263. (int)real_len, name);
  264. } else {
  265. pr_warning("no symbols found in %s, maybe install "
  266. "a debug package?\n", name);
  267. }
  268. #endif
  269. return -1;
  270. }
  271. return 0;
  272. }
  273. int __weak arch__compare_symbol_names(const char *namea, const char *nameb)
  274. {
  275. return strcmp(namea, nameb);
  276. }
  277. struct symbol *map__find_symbol(struct map *map, u64 addr,
  278. symbol_filter_t filter)
  279. {
  280. if (map__load(map, filter) < 0)
  281. return NULL;
  282. return dso__find_symbol(map->dso, map->type, addr);
  283. }
  284. struct symbol *map__find_symbol_by_name(struct map *map, const char *name,
  285. symbol_filter_t filter)
  286. {
  287. if (map__load(map, filter) < 0)
  288. return NULL;
  289. if (!dso__sorted_by_name(map->dso, map->type))
  290. dso__sort_by_name(map->dso, map->type);
  291. return dso__find_symbol_by_name(map->dso, map->type, name);
  292. }
  293. struct map *map__clone(struct map *from)
  294. {
  295. struct map *map = memdup(from, sizeof(*map));
  296. if (map != NULL) {
  297. atomic_set(&map->refcnt, 1);
  298. RB_CLEAR_NODE(&map->rb_node);
  299. dso__get(map->dso);
  300. map->groups = NULL;
  301. }
  302. return map;
  303. }
  304. int map__overlap(struct map *l, struct map *r)
  305. {
  306. if (l->start > r->start) {
  307. struct map *t = l;
  308. l = r;
  309. r = t;
  310. }
  311. if (l->end > r->start)
  312. return 1;
  313. return 0;
  314. }
  315. size_t map__fprintf(struct map *map, FILE *fp)
  316. {
  317. return fprintf(fp, " %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s\n",
  318. map->start, map->end, map->pgoff, map->dso->name);
  319. }
  320. size_t map__fprintf_dsoname(struct map *map, FILE *fp)
  321. {
  322. const char *dsoname = "[unknown]";
  323. if (map && map->dso && (map->dso->name || map->dso->long_name)) {
  324. if (symbol_conf.show_kernel_path && map->dso->long_name)
  325. dsoname = map->dso->long_name;
  326. else if (map->dso->name)
  327. dsoname = map->dso->name;
  328. }
  329. return fprintf(fp, "%s", dsoname);
  330. }
  331. int map__fprintf_srcline(struct map *map, u64 addr, const char *prefix,
  332. FILE *fp)
  333. {
  334. char *srcline;
  335. int ret = 0;
  336. if (map && map->dso) {
  337. srcline = get_srcline(map->dso,
  338. map__rip_2objdump(map, addr), NULL, true);
  339. if (srcline != SRCLINE_UNKNOWN)
  340. ret = fprintf(fp, "%s%s", prefix, srcline);
  341. free_srcline(srcline);
  342. }
  343. return ret;
  344. }
  345. /**
  346. * map__rip_2objdump - convert symbol start address to objdump address.
  347. * @map: memory map
  348. * @rip: symbol start address
  349. *
  350. * objdump wants/reports absolute IPs for ET_EXEC, and RIPs for ET_DYN.
  351. * map->dso->adjust_symbols==1 for ET_EXEC-like cases except ET_REL which is
  352. * relative to section start.
  353. *
  354. * Return: Address suitable for passing to "objdump --start-address="
  355. */
  356. u64 map__rip_2objdump(struct map *map, u64 rip)
  357. {
  358. if (!map->dso->adjust_symbols)
  359. return rip;
  360. if (map->dso->rel)
  361. return rip - map->pgoff;
  362. return map->unmap_ip(map, rip) - map->reloc;
  363. }
  364. /**
  365. * map__objdump_2mem - convert objdump address to a memory address.
  366. * @map: memory map
  367. * @ip: objdump address
  368. *
  369. * Closely related to map__rip_2objdump(), this function takes an address from
  370. * objdump and converts it to a memory address. Note this assumes that @map
  371. * contains the address. To be sure the result is valid, check it forwards
  372. * e.g. map__rip_2objdump(map->map_ip(map, map__objdump_2mem(map, ip))) == ip
  373. *
  374. * Return: Memory address.
  375. */
  376. u64 map__objdump_2mem(struct map *map, u64 ip)
  377. {
  378. if (!map->dso->adjust_symbols)
  379. return map->unmap_ip(map, ip);
  380. if (map->dso->rel)
  381. return map->unmap_ip(map, ip + map->pgoff);
  382. return ip + map->reloc;
  383. }
  384. static void maps__init(struct maps *maps)
  385. {
  386. maps->entries = RB_ROOT;
  387. pthread_rwlock_init(&maps->lock, NULL);
  388. }
  389. void map_groups__init(struct map_groups *mg, struct machine *machine)
  390. {
  391. int i;
  392. for (i = 0; i < MAP__NR_TYPES; ++i) {
  393. maps__init(&mg->maps[i]);
  394. }
  395. mg->machine = machine;
  396. atomic_set(&mg->refcnt, 1);
  397. }
  398. static void __maps__purge(struct maps *maps)
  399. {
  400. struct rb_root *root = &maps->entries;
  401. struct rb_node *next = rb_first(root);
  402. while (next) {
  403. struct map *pos = rb_entry(next, struct map, rb_node);
  404. next = rb_next(&pos->rb_node);
  405. rb_erase_init(&pos->rb_node, root);
  406. map__put(pos);
  407. }
  408. }
  409. static void maps__exit(struct maps *maps)
  410. {
  411. pthread_rwlock_wrlock(&maps->lock);
  412. __maps__purge(maps);
  413. pthread_rwlock_unlock(&maps->lock);
  414. }
  415. void map_groups__exit(struct map_groups *mg)
  416. {
  417. int i;
  418. for (i = 0; i < MAP__NR_TYPES; ++i)
  419. maps__exit(&mg->maps[i]);
  420. }
  421. bool map_groups__empty(struct map_groups *mg)
  422. {
  423. int i;
  424. for (i = 0; i < MAP__NR_TYPES; ++i) {
  425. if (maps__first(&mg->maps[i]))
  426. return false;
  427. }
  428. return true;
  429. }
  430. struct map_groups *map_groups__new(struct machine *machine)
  431. {
  432. struct map_groups *mg = malloc(sizeof(*mg));
  433. if (mg != NULL)
  434. map_groups__init(mg, machine);
  435. return mg;
  436. }
  437. void map_groups__delete(struct map_groups *mg)
  438. {
  439. map_groups__exit(mg);
  440. free(mg);
  441. }
  442. void map_groups__put(struct map_groups *mg)
  443. {
  444. if (mg && atomic_dec_and_test(&mg->refcnt))
  445. map_groups__delete(mg);
  446. }
  447. struct symbol *map_groups__find_symbol(struct map_groups *mg,
  448. enum map_type type, u64 addr,
  449. struct map **mapp,
  450. symbol_filter_t filter)
  451. {
  452. struct map *map = map_groups__find(mg, type, addr);
  453. /* Ensure map is loaded before using map->map_ip */
  454. if (map != NULL && map__load(map, filter) >= 0) {
  455. if (mapp != NULL)
  456. *mapp = map;
  457. return map__find_symbol(map, map->map_ip(map, addr), filter);
  458. }
  459. return NULL;
  460. }
  461. struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name,
  462. struct map **mapp, symbol_filter_t filter)
  463. {
  464. struct symbol *sym;
  465. struct rb_node *nd;
  466. pthread_rwlock_rdlock(&maps->lock);
  467. for (nd = rb_first(&maps->entries); nd; nd = rb_next(nd)) {
  468. struct map *pos = rb_entry(nd, struct map, rb_node);
  469. sym = map__find_symbol_by_name(pos, name, filter);
  470. if (sym == NULL)
  471. continue;
  472. if (mapp != NULL)
  473. *mapp = pos;
  474. goto out;
  475. }
  476. sym = NULL;
  477. out:
  478. pthread_rwlock_unlock(&maps->lock);
  479. return sym;
  480. }
  481. struct symbol *map_groups__find_symbol_by_name(struct map_groups *mg,
  482. enum map_type type,
  483. const char *name,
  484. struct map **mapp,
  485. symbol_filter_t filter)
  486. {
  487. struct symbol *sym = maps__find_symbol_by_name(&mg->maps[type], name, mapp, filter);
  488. return sym;
  489. }
  490. int map_groups__find_ams(struct addr_map_symbol *ams, symbol_filter_t filter)
  491. {
  492. if (ams->addr < ams->map->start || ams->addr >= ams->map->end) {
  493. if (ams->map->groups == NULL)
  494. return -1;
  495. ams->map = map_groups__find(ams->map->groups, ams->map->type,
  496. ams->addr);
  497. if (ams->map == NULL)
  498. return -1;
  499. }
  500. ams->al_addr = ams->map->map_ip(ams->map, ams->addr);
  501. ams->sym = map__find_symbol(ams->map, ams->al_addr, filter);
  502. return ams->sym ? 0 : -1;
  503. }
  504. static size_t maps__fprintf(struct maps *maps, FILE *fp)
  505. {
  506. size_t printed = 0;
  507. struct rb_node *nd;
  508. pthread_rwlock_rdlock(&maps->lock);
  509. for (nd = rb_first(&maps->entries); nd; nd = rb_next(nd)) {
  510. struct map *pos = rb_entry(nd, struct map, rb_node);
  511. printed += fprintf(fp, "Map:");
  512. printed += map__fprintf(pos, fp);
  513. if (verbose > 2) {
  514. printed += dso__fprintf(pos->dso, pos->type, fp);
  515. printed += fprintf(fp, "--\n");
  516. }
  517. }
  518. pthread_rwlock_unlock(&maps->lock);
  519. return printed;
  520. }
  521. size_t __map_groups__fprintf_maps(struct map_groups *mg, enum map_type type,
  522. FILE *fp)
  523. {
  524. size_t printed = fprintf(fp, "%s:\n", map_type__name[type]);
  525. return printed += maps__fprintf(&mg->maps[type], fp);
  526. }
  527. size_t map_groups__fprintf(struct map_groups *mg, FILE *fp)
  528. {
  529. size_t printed = 0, i;
  530. for (i = 0; i < MAP__NR_TYPES; ++i)
  531. printed += __map_groups__fprintf_maps(mg, i, fp);
  532. return printed;
  533. }
  534. static void __map_groups__insert(struct map_groups *mg, struct map *map)
  535. {
  536. __maps__insert(&mg->maps[map->type], map);
  537. map->groups = mg;
  538. }
  539. static int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp)
  540. {
  541. struct rb_root *root;
  542. struct rb_node *next;
  543. int err = 0;
  544. pthread_rwlock_wrlock(&maps->lock);
  545. root = &maps->entries;
  546. next = rb_first(root);
  547. while (next) {
  548. struct map *pos = rb_entry(next, struct map, rb_node);
  549. next = rb_next(&pos->rb_node);
  550. if (!map__overlap(pos, map))
  551. continue;
  552. if (verbose >= 2) {
  553. fputs("overlapping maps:\n", fp);
  554. map__fprintf(map, fp);
  555. map__fprintf(pos, fp);
  556. }
  557. rb_erase_init(&pos->rb_node, root);
  558. /*
  559. * Now check if we need to create new maps for areas not
  560. * overlapped by the new map:
  561. */
  562. if (map->start > pos->start) {
  563. struct map *before = map__clone(pos);
  564. if (before == NULL) {
  565. err = -ENOMEM;
  566. goto put_map;
  567. }
  568. before->end = map->start;
  569. __map_groups__insert(pos->groups, before);
  570. if (verbose >= 2)
  571. map__fprintf(before, fp);
  572. }
  573. if (map->end < pos->end) {
  574. struct map *after = map__clone(pos);
  575. if (after == NULL) {
  576. err = -ENOMEM;
  577. goto put_map;
  578. }
  579. after->start = map->end;
  580. __map_groups__insert(pos->groups, after);
  581. if (verbose >= 2)
  582. map__fprintf(after, fp);
  583. }
  584. put_map:
  585. map__put(pos);
  586. if (err)
  587. goto out;
  588. }
  589. err = 0;
  590. out:
  591. pthread_rwlock_unlock(&maps->lock);
  592. return err;
  593. }
  594. int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map,
  595. FILE *fp)
  596. {
  597. return maps__fixup_overlappings(&mg->maps[map->type], map, fp);
  598. }
  599. /*
  600. * XXX This should not really _copy_ te maps, but refcount them.
  601. */
  602. int map_groups__clone(struct map_groups *mg,
  603. struct map_groups *parent, enum map_type type)
  604. {
  605. int err = -ENOMEM;
  606. struct map *map;
  607. struct maps *maps = &parent->maps[type];
  608. pthread_rwlock_rdlock(&maps->lock);
  609. for (map = maps__first(maps); map; map = map__next(map)) {
  610. struct map *new = map__clone(map);
  611. if (new == NULL)
  612. goto out_unlock;
  613. map_groups__insert(mg, new);
  614. }
  615. err = 0;
  616. out_unlock:
  617. pthread_rwlock_unlock(&maps->lock);
  618. return err;
  619. }
  620. static void __maps__insert(struct maps *maps, struct map *map)
  621. {
  622. struct rb_node **p = &maps->entries.rb_node;
  623. struct rb_node *parent = NULL;
  624. const u64 ip = map->start;
  625. struct map *m;
  626. while (*p != NULL) {
  627. parent = *p;
  628. m = rb_entry(parent, struct map, rb_node);
  629. if (ip < m->start)
  630. p = &(*p)->rb_left;
  631. else
  632. p = &(*p)->rb_right;
  633. }
  634. rb_link_node(&map->rb_node, parent, p);
  635. rb_insert_color(&map->rb_node, &maps->entries);
  636. map__get(map);
  637. }
  638. void maps__insert(struct maps *maps, struct map *map)
  639. {
  640. pthread_rwlock_wrlock(&maps->lock);
  641. __maps__insert(maps, map);
  642. pthread_rwlock_unlock(&maps->lock);
  643. }
  644. static void __maps__remove(struct maps *maps, struct map *map)
  645. {
  646. rb_erase_init(&map->rb_node, &maps->entries);
  647. map__put(map);
  648. }
  649. void maps__remove(struct maps *maps, struct map *map)
  650. {
  651. pthread_rwlock_wrlock(&maps->lock);
  652. __maps__remove(maps, map);
  653. pthread_rwlock_unlock(&maps->lock);
  654. }
  655. struct map *maps__find(struct maps *maps, u64 ip)
  656. {
  657. struct rb_node **p, *parent = NULL;
  658. struct map *m;
  659. pthread_rwlock_rdlock(&maps->lock);
  660. p = &maps->entries.rb_node;
  661. while (*p != NULL) {
  662. parent = *p;
  663. m = rb_entry(parent, struct map, rb_node);
  664. if (ip < m->start)
  665. p = &(*p)->rb_left;
  666. else if (ip >= m->end)
  667. p = &(*p)->rb_right;
  668. else
  669. goto out;
  670. }
  671. m = NULL;
  672. out:
  673. pthread_rwlock_unlock(&maps->lock);
  674. return m;
  675. }
  676. struct map *maps__first(struct maps *maps)
  677. {
  678. struct rb_node *first = rb_first(&maps->entries);
  679. if (first)
  680. return rb_entry(first, struct map, rb_node);
  681. return NULL;
  682. }
  683. struct map *map__next(struct map *map)
  684. {
  685. struct rb_node *next = rb_next(&map->rb_node);
  686. if (next)
  687. return rb_entry(next, struct map, rb_node);
  688. return NULL;
  689. }
  690. struct kmap *map__kmap(struct map *map)
  691. {
  692. if (!map->dso || !map->dso->kernel) {
  693. pr_err("Internal error: map__kmap with a non-kernel map\n");
  694. return NULL;
  695. }
  696. return (struct kmap *)(map + 1);
  697. }
  698. struct map_groups *map__kmaps(struct map *map)
  699. {
  700. struct kmap *kmap = map__kmap(map);
  701. if (!kmap || !kmap->kmaps) {
  702. pr_err("Internal error: map__kmaps with a non-kernel map\n");
  703. return NULL;
  704. }
  705. return kmap->kmaps;
  706. }