dso.c 31 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369
  1. #include <asm/bug.h>
  2. #include <sys/time.h>
  3. #include <sys/resource.h>
  4. #include "symbol.h"
  5. #include "dso.h"
  6. #include "machine.h"
  7. #include "auxtrace.h"
  8. #include "util.h"
  9. #include "debug.h"
  10. char dso__symtab_origin(const struct dso *dso)
  11. {
  12. static const char origin[] = {
  13. [DSO_BINARY_TYPE__KALLSYMS] = 'k',
  14. [DSO_BINARY_TYPE__VMLINUX] = 'v',
  15. [DSO_BINARY_TYPE__JAVA_JIT] = 'j',
  16. [DSO_BINARY_TYPE__DEBUGLINK] = 'l',
  17. [DSO_BINARY_TYPE__BUILD_ID_CACHE] = 'B',
  18. [DSO_BINARY_TYPE__FEDORA_DEBUGINFO] = 'f',
  19. [DSO_BINARY_TYPE__UBUNTU_DEBUGINFO] = 'u',
  20. [DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO] = 'o',
  21. [DSO_BINARY_TYPE__BUILDID_DEBUGINFO] = 'b',
  22. [DSO_BINARY_TYPE__SYSTEM_PATH_DSO] = 'd',
  23. [DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE] = 'K',
  24. [DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP] = 'm',
  25. [DSO_BINARY_TYPE__GUEST_KALLSYMS] = 'g',
  26. [DSO_BINARY_TYPE__GUEST_KMODULE] = 'G',
  27. [DSO_BINARY_TYPE__GUEST_KMODULE_COMP] = 'M',
  28. [DSO_BINARY_TYPE__GUEST_VMLINUX] = 'V',
  29. };
  30. if (dso == NULL || dso->symtab_type == DSO_BINARY_TYPE__NOT_FOUND)
  31. return '!';
  32. return origin[dso->symtab_type];
  33. }
  34. int dso__read_binary_type_filename(const struct dso *dso,
  35. enum dso_binary_type type,
  36. char *root_dir, char *filename, size_t size)
  37. {
  38. char build_id_hex[BUILD_ID_SIZE * 2 + 1];
  39. int ret = 0;
  40. size_t len;
  41. switch (type) {
  42. case DSO_BINARY_TYPE__DEBUGLINK: {
  43. char *debuglink;
  44. len = __symbol__join_symfs(filename, size, dso->long_name);
  45. debuglink = filename + len;
  46. while (debuglink != filename && *debuglink != '/')
  47. debuglink--;
  48. if (*debuglink == '/')
  49. debuglink++;
  50. ret = filename__read_debuglink(filename, debuglink,
  51. size - (debuglink - filename));
  52. }
  53. break;
  54. case DSO_BINARY_TYPE__BUILD_ID_CACHE:
  55. /* skip the locally configured cache if a symfs is given */
  56. if (symbol_conf.symfs[0] ||
  57. (dso__build_id_filename(dso, filename, size) == NULL))
  58. ret = -1;
  59. break;
  60. case DSO_BINARY_TYPE__FEDORA_DEBUGINFO:
  61. len = __symbol__join_symfs(filename, size, "/usr/lib/debug");
  62. snprintf(filename + len, size - len, "%s.debug", dso->long_name);
  63. break;
  64. case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO:
  65. len = __symbol__join_symfs(filename, size, "/usr/lib/debug");
  66. snprintf(filename + len, size - len, "%s", dso->long_name);
  67. break;
  68. case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO:
  69. {
  70. const char *last_slash;
  71. size_t dir_size;
  72. last_slash = dso->long_name + dso->long_name_len;
  73. while (last_slash != dso->long_name && *last_slash != '/')
  74. last_slash--;
  75. len = __symbol__join_symfs(filename, size, "");
  76. dir_size = last_slash - dso->long_name + 2;
  77. if (dir_size > (size - len)) {
  78. ret = -1;
  79. break;
  80. }
  81. len += scnprintf(filename + len, dir_size, "%s", dso->long_name);
  82. len += scnprintf(filename + len , size - len, ".debug%s",
  83. last_slash);
  84. break;
  85. }
  86. case DSO_BINARY_TYPE__BUILDID_DEBUGINFO:
  87. if (!dso->has_build_id) {
  88. ret = -1;
  89. break;
  90. }
  91. build_id__sprintf(dso->build_id,
  92. sizeof(dso->build_id),
  93. build_id_hex);
  94. len = __symbol__join_symfs(filename, size, "/usr/lib/debug/.build-id/");
  95. snprintf(filename + len, size - len, "%.2s/%s.debug",
  96. build_id_hex, build_id_hex + 2);
  97. break;
  98. case DSO_BINARY_TYPE__VMLINUX:
  99. case DSO_BINARY_TYPE__GUEST_VMLINUX:
  100. case DSO_BINARY_TYPE__SYSTEM_PATH_DSO:
  101. __symbol__join_symfs(filename, size, dso->long_name);
  102. break;
  103. case DSO_BINARY_TYPE__GUEST_KMODULE:
  104. case DSO_BINARY_TYPE__GUEST_KMODULE_COMP:
  105. path__join3(filename, size, symbol_conf.symfs,
  106. root_dir, dso->long_name);
  107. break;
  108. case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE:
  109. case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP:
  110. __symbol__join_symfs(filename, size, dso->long_name);
  111. break;
  112. case DSO_BINARY_TYPE__KCORE:
  113. case DSO_BINARY_TYPE__GUEST_KCORE:
  114. snprintf(filename, size, "%s", dso->long_name);
  115. break;
  116. default:
  117. case DSO_BINARY_TYPE__KALLSYMS:
  118. case DSO_BINARY_TYPE__GUEST_KALLSYMS:
  119. case DSO_BINARY_TYPE__JAVA_JIT:
  120. case DSO_BINARY_TYPE__NOT_FOUND:
  121. ret = -1;
  122. break;
  123. }
  124. return ret;
  125. }
  126. static const struct {
  127. const char *fmt;
  128. int (*decompress)(const char *input, int output);
  129. } compressions[] = {
  130. #ifdef HAVE_ZLIB_SUPPORT
  131. { "gz", gzip_decompress_to_file },
  132. #endif
  133. #ifdef HAVE_LZMA_SUPPORT
  134. { "xz", lzma_decompress_to_file },
  135. #endif
  136. { NULL, NULL },
  137. };
  138. bool is_supported_compression(const char *ext)
  139. {
  140. unsigned i;
  141. for (i = 0; compressions[i].fmt; i++) {
  142. if (!strcmp(ext, compressions[i].fmt))
  143. return true;
  144. }
  145. return false;
  146. }
  147. bool is_kernel_module(const char *pathname, int cpumode)
  148. {
  149. struct kmod_path m;
  150. int mode = cpumode & PERF_RECORD_MISC_CPUMODE_MASK;
  151. WARN_ONCE(mode != cpumode,
  152. "Internal error: passing unmasked cpumode (%x) to is_kernel_module",
  153. cpumode);
  154. switch (mode) {
  155. case PERF_RECORD_MISC_USER:
  156. case PERF_RECORD_MISC_HYPERVISOR:
  157. case PERF_RECORD_MISC_GUEST_USER:
  158. return false;
  159. /* Treat PERF_RECORD_MISC_CPUMODE_UNKNOWN as kernel */
  160. default:
  161. if (kmod_path__parse(&m, pathname)) {
  162. pr_err("Failed to check whether %s is a kernel module or not. Assume it is.",
  163. pathname);
  164. return true;
  165. }
  166. }
  167. return m.kmod;
  168. }
  169. bool decompress_to_file(const char *ext, const char *filename, int output_fd)
  170. {
  171. unsigned i;
  172. for (i = 0; compressions[i].fmt; i++) {
  173. if (!strcmp(ext, compressions[i].fmt))
  174. return !compressions[i].decompress(filename,
  175. output_fd);
  176. }
  177. return false;
  178. }
  179. bool dso__needs_decompress(struct dso *dso)
  180. {
  181. return dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP ||
  182. dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE_COMP;
  183. }
  184. /*
  185. * Parses kernel module specified in @path and updates
  186. * @m argument like:
  187. *
  188. * @comp - true if @path contains supported compression suffix,
  189. * false otherwise
  190. * @kmod - true if @path contains '.ko' suffix in right position,
  191. * false otherwise
  192. * @name - if (@alloc_name && @kmod) is true, it contains strdup-ed base name
  193. * of the kernel module without suffixes, otherwise strudup-ed
  194. * base name of @path
  195. * @ext - if (@alloc_ext && @comp) is true, it contains strdup-ed string
  196. * the compression suffix
  197. *
  198. * Returns 0 if there's no strdup error, -ENOMEM otherwise.
  199. */
  200. int __kmod_path__parse(struct kmod_path *m, const char *path,
  201. bool alloc_name, bool alloc_ext)
  202. {
  203. const char *name = strrchr(path, '/');
  204. const char *ext = strrchr(path, '.');
  205. bool is_simple_name = false;
  206. memset(m, 0x0, sizeof(*m));
  207. name = name ? name + 1 : path;
  208. /*
  209. * '.' is also a valid character for module name. For example:
  210. * [aaa.bbb] is a valid module name. '[' should have higher
  211. * priority than '.ko' suffix.
  212. *
  213. * The kernel names are from machine__mmap_name. Such
  214. * name should belong to kernel itself, not kernel module.
  215. */
  216. if (name[0] == '[') {
  217. is_simple_name = true;
  218. if ((strncmp(name, "[kernel.kallsyms]", 17) == 0) ||
  219. (strncmp(name, "[guest.kernel.kallsyms", 22) == 0) ||
  220. (strncmp(name, "[vdso]", 6) == 0) ||
  221. (strncmp(name, "[vdso32]", 8) == 0) ||
  222. (strncmp(name, "[vdsox32]", 9) == 0) ||
  223. (strncmp(name, "[vsyscall]", 10) == 0)) {
  224. m->kmod = false;
  225. } else
  226. m->kmod = true;
  227. }
  228. /* No extension, just return name. */
  229. if ((ext == NULL) || is_simple_name) {
  230. if (alloc_name) {
  231. m->name = strdup(name);
  232. return m->name ? 0 : -ENOMEM;
  233. }
  234. return 0;
  235. }
  236. if (is_supported_compression(ext + 1)) {
  237. m->comp = true;
  238. ext -= 3;
  239. }
  240. /* Check .ko extension only if there's enough name left. */
  241. if (ext > name)
  242. m->kmod = !strncmp(ext, ".ko", 3);
  243. if (alloc_name) {
  244. if (m->kmod) {
  245. if (asprintf(&m->name, "[%.*s]", (int) (ext - name), name) == -1)
  246. return -ENOMEM;
  247. } else {
  248. if (asprintf(&m->name, "%s", name) == -1)
  249. return -ENOMEM;
  250. }
  251. strxfrchar(m->name, '-', '_');
  252. }
  253. if (alloc_ext && m->comp) {
  254. m->ext = strdup(ext + 4);
  255. if (!m->ext) {
  256. free((void *) m->name);
  257. return -ENOMEM;
  258. }
  259. }
  260. return 0;
  261. }
  262. /*
  263. * Global list of open DSOs and the counter.
  264. */
  265. static LIST_HEAD(dso__data_open);
  266. static long dso__data_open_cnt;
  267. static pthread_mutex_t dso__data_open_lock = PTHREAD_MUTEX_INITIALIZER;
  268. static void dso__list_add(struct dso *dso)
  269. {
  270. list_add_tail(&dso->data.open_entry, &dso__data_open);
  271. dso__data_open_cnt++;
  272. }
  273. static void dso__list_del(struct dso *dso)
  274. {
  275. list_del(&dso->data.open_entry);
  276. WARN_ONCE(dso__data_open_cnt <= 0,
  277. "DSO data fd counter out of bounds.");
  278. dso__data_open_cnt--;
  279. }
  280. static void close_first_dso(void);
  281. static int do_open(char *name)
  282. {
  283. int fd;
  284. char sbuf[STRERR_BUFSIZE];
  285. do {
  286. fd = open(name, O_RDONLY);
  287. if (fd >= 0)
  288. return fd;
  289. pr_debug("dso open failed: %s\n",
  290. strerror_r(errno, sbuf, sizeof(sbuf)));
  291. if (!dso__data_open_cnt || errno != EMFILE)
  292. break;
  293. close_first_dso();
  294. } while (1);
  295. return -1;
  296. }
  297. static int __open_dso(struct dso *dso, struct machine *machine)
  298. {
  299. int fd;
  300. char *root_dir = (char *)"";
  301. char *name = malloc(PATH_MAX);
  302. if (!name)
  303. return -ENOMEM;
  304. if (machine)
  305. root_dir = machine->root_dir;
  306. if (dso__read_binary_type_filename(dso, dso->binary_type,
  307. root_dir, name, PATH_MAX)) {
  308. free(name);
  309. return -EINVAL;
  310. }
  311. fd = do_open(name);
  312. free(name);
  313. return fd;
  314. }
  315. static void check_data_close(void);
  316. /**
  317. * dso_close - Open DSO data file
  318. * @dso: dso object
  319. *
  320. * Open @dso's data file descriptor and updates
  321. * list/count of open DSO objects.
  322. */
  323. static int open_dso(struct dso *dso, struct machine *machine)
  324. {
  325. int fd = __open_dso(dso, machine);
  326. if (fd >= 0) {
  327. dso__list_add(dso);
  328. /*
  329. * Check if we crossed the allowed number
  330. * of opened DSOs and close one if needed.
  331. */
  332. check_data_close();
  333. }
  334. return fd;
  335. }
  336. static void close_data_fd(struct dso *dso)
  337. {
  338. if (dso->data.fd >= 0) {
  339. close(dso->data.fd);
  340. dso->data.fd = -1;
  341. dso->data.file_size = 0;
  342. dso__list_del(dso);
  343. }
  344. }
  345. /**
  346. * dso_close - Close DSO data file
  347. * @dso: dso object
  348. *
  349. * Close @dso's data file descriptor and updates
  350. * list/count of open DSO objects.
  351. */
  352. static void close_dso(struct dso *dso)
  353. {
  354. close_data_fd(dso);
  355. }
  356. static void close_first_dso(void)
  357. {
  358. struct dso *dso;
  359. dso = list_first_entry(&dso__data_open, struct dso, data.open_entry);
  360. close_dso(dso);
  361. }
  362. static rlim_t get_fd_limit(void)
  363. {
  364. struct rlimit l;
  365. rlim_t limit = 0;
  366. /* Allow half of the current open fd limit. */
  367. if (getrlimit(RLIMIT_NOFILE, &l) == 0) {
  368. if (l.rlim_cur == RLIM_INFINITY)
  369. limit = l.rlim_cur;
  370. else
  371. limit = l.rlim_cur / 2;
  372. } else {
  373. pr_err("failed to get fd limit\n");
  374. limit = 1;
  375. }
  376. return limit;
  377. }
  378. static bool may_cache_fd(void)
  379. {
  380. static rlim_t limit;
  381. if (!limit)
  382. limit = get_fd_limit();
  383. if (limit == RLIM_INFINITY)
  384. return true;
  385. return limit > (rlim_t) dso__data_open_cnt;
  386. }
  387. /*
  388. * Check and close LRU dso if we crossed allowed limit
  389. * for opened dso file descriptors. The limit is half
  390. * of the RLIMIT_NOFILE files opened.
  391. */
  392. static void check_data_close(void)
  393. {
  394. bool cache_fd = may_cache_fd();
  395. if (!cache_fd)
  396. close_first_dso();
  397. }
  398. /**
  399. * dso__data_close - Close DSO data file
  400. * @dso: dso object
  401. *
  402. * External interface to close @dso's data file descriptor.
  403. */
  404. void dso__data_close(struct dso *dso)
  405. {
  406. pthread_mutex_lock(&dso__data_open_lock);
  407. close_dso(dso);
  408. pthread_mutex_unlock(&dso__data_open_lock);
  409. }
  410. static void try_to_open_dso(struct dso *dso, struct machine *machine)
  411. {
  412. enum dso_binary_type binary_type_data[] = {
  413. DSO_BINARY_TYPE__BUILD_ID_CACHE,
  414. DSO_BINARY_TYPE__SYSTEM_PATH_DSO,
  415. DSO_BINARY_TYPE__NOT_FOUND,
  416. };
  417. int i = 0;
  418. if (dso->data.fd >= 0)
  419. return;
  420. if (dso->binary_type != DSO_BINARY_TYPE__NOT_FOUND) {
  421. dso->data.fd = open_dso(dso, machine);
  422. goto out;
  423. }
  424. do {
  425. dso->binary_type = binary_type_data[i++];
  426. dso->data.fd = open_dso(dso, machine);
  427. if (dso->data.fd >= 0)
  428. goto out;
  429. } while (dso->binary_type != DSO_BINARY_TYPE__NOT_FOUND);
  430. out:
  431. if (dso->data.fd >= 0)
  432. dso->data.status = DSO_DATA_STATUS_OK;
  433. else
  434. dso->data.status = DSO_DATA_STATUS_ERROR;
  435. }
  436. /**
  437. * dso__data_get_fd - Get dso's data file descriptor
  438. * @dso: dso object
  439. * @machine: machine object
  440. *
  441. * External interface to find dso's file, open it and
  442. * returns file descriptor. It should be paired with
  443. * dso__data_put_fd() if it returns non-negative value.
  444. */
  445. int dso__data_get_fd(struct dso *dso, struct machine *machine)
  446. {
  447. if (dso->data.status == DSO_DATA_STATUS_ERROR)
  448. return -1;
  449. if (pthread_mutex_lock(&dso__data_open_lock) < 0)
  450. return -1;
  451. try_to_open_dso(dso, machine);
  452. if (dso->data.fd < 0)
  453. pthread_mutex_unlock(&dso__data_open_lock);
  454. return dso->data.fd;
  455. }
  456. void dso__data_put_fd(struct dso *dso __maybe_unused)
  457. {
  458. pthread_mutex_unlock(&dso__data_open_lock);
  459. }
  460. bool dso__data_status_seen(struct dso *dso, enum dso_data_status_seen by)
  461. {
  462. u32 flag = 1 << by;
  463. if (dso->data.status_seen & flag)
  464. return true;
  465. dso->data.status_seen |= flag;
  466. return false;
  467. }
  468. static void
  469. dso_cache__free(struct dso *dso)
  470. {
  471. struct rb_root *root = &dso->data.cache;
  472. struct rb_node *next = rb_first(root);
  473. pthread_mutex_lock(&dso->lock);
  474. while (next) {
  475. struct dso_cache *cache;
  476. cache = rb_entry(next, struct dso_cache, rb_node);
  477. next = rb_next(&cache->rb_node);
  478. rb_erase(&cache->rb_node, root);
  479. free(cache);
  480. }
  481. pthread_mutex_unlock(&dso->lock);
  482. }
  483. static struct dso_cache *dso_cache__find(struct dso *dso, u64 offset)
  484. {
  485. const struct rb_root *root = &dso->data.cache;
  486. struct rb_node * const *p = &root->rb_node;
  487. const struct rb_node *parent = NULL;
  488. struct dso_cache *cache;
  489. while (*p != NULL) {
  490. u64 end;
  491. parent = *p;
  492. cache = rb_entry(parent, struct dso_cache, rb_node);
  493. end = cache->offset + DSO__DATA_CACHE_SIZE;
  494. if (offset < cache->offset)
  495. p = &(*p)->rb_left;
  496. else if (offset >= end)
  497. p = &(*p)->rb_right;
  498. else
  499. return cache;
  500. }
  501. return NULL;
  502. }
  503. static struct dso_cache *
  504. dso_cache__insert(struct dso *dso, struct dso_cache *new)
  505. {
  506. struct rb_root *root = &dso->data.cache;
  507. struct rb_node **p = &root->rb_node;
  508. struct rb_node *parent = NULL;
  509. struct dso_cache *cache;
  510. u64 offset = new->offset;
  511. pthread_mutex_lock(&dso->lock);
  512. while (*p != NULL) {
  513. u64 end;
  514. parent = *p;
  515. cache = rb_entry(parent, struct dso_cache, rb_node);
  516. end = cache->offset + DSO__DATA_CACHE_SIZE;
  517. if (offset < cache->offset)
  518. p = &(*p)->rb_left;
  519. else if (offset >= end)
  520. p = &(*p)->rb_right;
  521. else
  522. goto out;
  523. }
  524. rb_link_node(&new->rb_node, parent, p);
  525. rb_insert_color(&new->rb_node, root);
  526. cache = NULL;
  527. out:
  528. pthread_mutex_unlock(&dso->lock);
  529. return cache;
  530. }
  531. static ssize_t
  532. dso_cache__memcpy(struct dso_cache *cache, u64 offset,
  533. u8 *data, u64 size)
  534. {
  535. u64 cache_offset = offset - cache->offset;
  536. u64 cache_size = min(cache->size - cache_offset, size);
  537. memcpy(data, cache->data + cache_offset, cache_size);
  538. return cache_size;
  539. }
  540. static ssize_t
  541. dso_cache__read(struct dso *dso, struct machine *machine,
  542. u64 offset, u8 *data, ssize_t size)
  543. {
  544. struct dso_cache *cache;
  545. struct dso_cache *old;
  546. ssize_t ret;
  547. do {
  548. u64 cache_offset;
  549. cache = zalloc(sizeof(*cache) + DSO__DATA_CACHE_SIZE);
  550. if (!cache)
  551. return -ENOMEM;
  552. pthread_mutex_lock(&dso__data_open_lock);
  553. /*
  554. * dso->data.fd might be closed if other thread opened another
  555. * file (dso) due to open file limit (RLIMIT_NOFILE).
  556. */
  557. try_to_open_dso(dso, machine);
  558. if (dso->data.fd < 0) {
  559. ret = -errno;
  560. dso->data.status = DSO_DATA_STATUS_ERROR;
  561. break;
  562. }
  563. cache_offset = offset & DSO__DATA_CACHE_MASK;
  564. ret = pread(dso->data.fd, cache->data, DSO__DATA_CACHE_SIZE, cache_offset);
  565. if (ret <= 0)
  566. break;
  567. cache->offset = cache_offset;
  568. cache->size = ret;
  569. } while (0);
  570. pthread_mutex_unlock(&dso__data_open_lock);
  571. if (ret > 0) {
  572. old = dso_cache__insert(dso, cache);
  573. if (old) {
  574. /* we lose the race */
  575. free(cache);
  576. cache = old;
  577. }
  578. ret = dso_cache__memcpy(cache, offset, data, size);
  579. }
  580. if (ret <= 0)
  581. free(cache);
  582. return ret;
  583. }
  584. static ssize_t dso_cache_read(struct dso *dso, struct machine *machine,
  585. u64 offset, u8 *data, ssize_t size)
  586. {
  587. struct dso_cache *cache;
  588. cache = dso_cache__find(dso, offset);
  589. if (cache)
  590. return dso_cache__memcpy(cache, offset, data, size);
  591. else
  592. return dso_cache__read(dso, machine, offset, data, size);
  593. }
  594. /*
  595. * Reads and caches dso data DSO__DATA_CACHE_SIZE size chunks
  596. * in the rb_tree. Any read to already cached data is served
  597. * by cached data.
  598. */
  599. static ssize_t cached_read(struct dso *dso, struct machine *machine,
  600. u64 offset, u8 *data, ssize_t size)
  601. {
  602. ssize_t r = 0;
  603. u8 *p = data;
  604. do {
  605. ssize_t ret;
  606. ret = dso_cache_read(dso, machine, offset, p, size);
  607. if (ret < 0)
  608. return ret;
  609. /* Reached EOF, return what we have. */
  610. if (!ret)
  611. break;
  612. BUG_ON(ret > size);
  613. r += ret;
  614. p += ret;
  615. offset += ret;
  616. size -= ret;
  617. } while (size);
  618. return r;
  619. }
  620. static int data_file_size(struct dso *dso, struct machine *machine)
  621. {
  622. int ret = 0;
  623. struct stat st;
  624. char sbuf[STRERR_BUFSIZE];
  625. if (dso->data.file_size)
  626. return 0;
  627. if (dso->data.status == DSO_DATA_STATUS_ERROR)
  628. return -1;
  629. pthread_mutex_lock(&dso__data_open_lock);
  630. /*
  631. * dso->data.fd might be closed if other thread opened another
  632. * file (dso) due to open file limit (RLIMIT_NOFILE).
  633. */
  634. try_to_open_dso(dso, machine);
  635. if (dso->data.fd < 0) {
  636. ret = -errno;
  637. dso->data.status = DSO_DATA_STATUS_ERROR;
  638. goto out;
  639. }
  640. if (fstat(dso->data.fd, &st) < 0) {
  641. ret = -errno;
  642. pr_err("dso cache fstat failed: %s\n",
  643. strerror_r(errno, sbuf, sizeof(sbuf)));
  644. dso->data.status = DSO_DATA_STATUS_ERROR;
  645. goto out;
  646. }
  647. dso->data.file_size = st.st_size;
  648. out:
  649. pthread_mutex_unlock(&dso__data_open_lock);
  650. return ret;
  651. }
  652. /**
  653. * dso__data_size - Return dso data size
  654. * @dso: dso object
  655. * @machine: machine object
  656. *
  657. * Return: dso data size
  658. */
  659. off_t dso__data_size(struct dso *dso, struct machine *machine)
  660. {
  661. if (data_file_size(dso, machine))
  662. return -1;
  663. /* For now just estimate dso data size is close to file size */
  664. return dso->data.file_size;
  665. }
  666. static ssize_t data_read_offset(struct dso *dso, struct machine *machine,
  667. u64 offset, u8 *data, ssize_t size)
  668. {
  669. if (data_file_size(dso, machine))
  670. return -1;
  671. /* Check the offset sanity. */
  672. if (offset > dso->data.file_size)
  673. return -1;
  674. if (offset + size < offset)
  675. return -1;
  676. return cached_read(dso, machine, offset, data, size);
  677. }
  678. /**
  679. * dso__data_read_offset - Read data from dso file offset
  680. * @dso: dso object
  681. * @machine: machine object
  682. * @offset: file offset
  683. * @data: buffer to store data
  684. * @size: size of the @data buffer
  685. *
  686. * External interface to read data from dso file offset. Open
  687. * dso data file and use cached_read to get the data.
  688. */
  689. ssize_t dso__data_read_offset(struct dso *dso, struct machine *machine,
  690. u64 offset, u8 *data, ssize_t size)
  691. {
  692. if (dso->data.status == DSO_DATA_STATUS_ERROR)
  693. return -1;
  694. return data_read_offset(dso, machine, offset, data, size);
  695. }
  696. /**
  697. * dso__data_read_addr - Read data from dso address
  698. * @dso: dso object
  699. * @machine: machine object
  700. * @add: virtual memory address
  701. * @data: buffer to store data
  702. * @size: size of the @data buffer
  703. *
  704. * External interface to read data from dso address.
  705. */
  706. ssize_t dso__data_read_addr(struct dso *dso, struct map *map,
  707. struct machine *machine, u64 addr,
  708. u8 *data, ssize_t size)
  709. {
  710. u64 offset = map->map_ip(map, addr);
  711. return dso__data_read_offset(dso, machine, offset, data, size);
  712. }
  713. struct map *dso__new_map(const char *name)
  714. {
  715. struct map *map = NULL;
  716. struct dso *dso = dso__new(name);
  717. if (dso)
  718. map = map__new2(0, dso, MAP__FUNCTION);
  719. return map;
  720. }
  721. struct dso *machine__findnew_kernel(struct machine *machine, const char *name,
  722. const char *short_name, int dso_type)
  723. {
  724. /*
  725. * The kernel dso could be created by build_id processing.
  726. */
  727. struct dso *dso = machine__findnew_dso(machine, name);
  728. /*
  729. * We need to run this in all cases, since during the build_id
  730. * processing we had no idea this was the kernel dso.
  731. */
  732. if (dso != NULL) {
  733. dso__set_short_name(dso, short_name, false);
  734. dso->kernel = dso_type;
  735. }
  736. return dso;
  737. }
  738. /*
  739. * Find a matching entry and/or link current entry to RB tree.
  740. * Either one of the dso or name parameter must be non-NULL or the
  741. * function will not work.
  742. */
  743. static struct dso *__dso__findlink_by_longname(struct rb_root *root,
  744. struct dso *dso, const char *name)
  745. {
  746. struct rb_node **p = &root->rb_node;
  747. struct rb_node *parent = NULL;
  748. if (!name)
  749. name = dso->long_name;
  750. /*
  751. * Find node with the matching name
  752. */
  753. while (*p) {
  754. struct dso *this = rb_entry(*p, struct dso, rb_node);
  755. int rc = strcmp(name, this->long_name);
  756. parent = *p;
  757. if (rc == 0) {
  758. /*
  759. * In case the new DSO is a duplicate of an existing
  760. * one, print an one-time warning & put the new entry
  761. * at the end of the list of duplicates.
  762. */
  763. if (!dso || (dso == this))
  764. return this; /* Find matching dso */
  765. /*
  766. * The core kernel DSOs may have duplicated long name.
  767. * In this case, the short name should be different.
  768. * Comparing the short names to differentiate the DSOs.
  769. */
  770. rc = strcmp(dso->short_name, this->short_name);
  771. if (rc == 0) {
  772. pr_err("Duplicated dso name: %s\n", name);
  773. return NULL;
  774. }
  775. }
  776. if (rc < 0)
  777. p = &parent->rb_left;
  778. else
  779. p = &parent->rb_right;
  780. }
  781. if (dso) {
  782. /* Add new node and rebalance tree */
  783. rb_link_node(&dso->rb_node, parent, p);
  784. rb_insert_color(&dso->rb_node, root);
  785. dso->root = root;
  786. }
  787. return NULL;
  788. }
  789. static inline struct dso *__dso__find_by_longname(struct rb_root *root,
  790. const char *name)
  791. {
  792. return __dso__findlink_by_longname(root, NULL, name);
  793. }
  794. void dso__set_long_name(struct dso *dso, const char *name, bool name_allocated)
  795. {
  796. struct rb_root *root = dso->root;
  797. if (name == NULL)
  798. return;
  799. if (dso->long_name_allocated)
  800. free((char *)dso->long_name);
  801. if (root) {
  802. rb_erase(&dso->rb_node, root);
  803. /*
  804. * __dso__findlink_by_longname() isn't guaranteed to add it
  805. * back, so a clean removal is required here.
  806. */
  807. RB_CLEAR_NODE(&dso->rb_node);
  808. dso->root = NULL;
  809. }
  810. dso->long_name = name;
  811. dso->long_name_len = strlen(name);
  812. dso->long_name_allocated = name_allocated;
  813. if (root)
  814. __dso__findlink_by_longname(root, dso, NULL);
  815. }
  816. void dso__set_short_name(struct dso *dso, const char *name, bool name_allocated)
  817. {
  818. if (name == NULL)
  819. return;
  820. if (dso->short_name_allocated)
  821. free((char *)dso->short_name);
  822. dso->short_name = name;
  823. dso->short_name_len = strlen(name);
  824. dso->short_name_allocated = name_allocated;
  825. }
  826. static void dso__set_basename(struct dso *dso)
  827. {
  828. /*
  829. * basename() may modify path buffer, so we must pass
  830. * a copy.
  831. */
  832. char *base, *lname = strdup(dso->long_name);
  833. if (!lname)
  834. return;
  835. /*
  836. * basename() may return a pointer to internal
  837. * storage which is reused in subsequent calls
  838. * so copy the result.
  839. */
  840. base = strdup(basename(lname));
  841. free(lname);
  842. if (!base)
  843. return;
  844. dso__set_short_name(dso, base, true);
  845. }
  846. int dso__name_len(const struct dso *dso)
  847. {
  848. if (!dso)
  849. return strlen("[unknown]");
  850. if (verbose)
  851. return dso->long_name_len;
  852. return dso->short_name_len;
  853. }
  854. bool dso__loaded(const struct dso *dso, enum map_type type)
  855. {
  856. return dso->loaded & (1 << type);
  857. }
  858. bool dso__sorted_by_name(const struct dso *dso, enum map_type type)
  859. {
  860. return dso->sorted_by_name & (1 << type);
  861. }
  862. void dso__set_sorted_by_name(struct dso *dso, enum map_type type)
  863. {
  864. dso->sorted_by_name |= (1 << type);
  865. }
  866. struct dso *dso__new(const char *name)
  867. {
  868. struct dso *dso = calloc(1, sizeof(*dso) + strlen(name) + 1);
  869. if (dso != NULL) {
  870. int i;
  871. strcpy(dso->name, name);
  872. dso__set_long_name(dso, dso->name, false);
  873. dso__set_short_name(dso, dso->name, false);
  874. for (i = 0; i < MAP__NR_TYPES; ++i)
  875. dso->symbols[i] = dso->symbol_names[i] = RB_ROOT;
  876. dso->data.cache = RB_ROOT;
  877. dso->data.fd = -1;
  878. dso->data.status = DSO_DATA_STATUS_UNKNOWN;
  879. dso->symtab_type = DSO_BINARY_TYPE__NOT_FOUND;
  880. dso->binary_type = DSO_BINARY_TYPE__NOT_FOUND;
  881. dso->is_64_bit = (sizeof(void *) == 8);
  882. dso->loaded = 0;
  883. dso->rel = 0;
  884. dso->sorted_by_name = 0;
  885. dso->has_build_id = 0;
  886. dso->has_srcline = 1;
  887. dso->a2l_fails = 1;
  888. dso->kernel = DSO_TYPE_USER;
  889. dso->needs_swap = DSO_SWAP__UNSET;
  890. RB_CLEAR_NODE(&dso->rb_node);
  891. dso->root = NULL;
  892. INIT_LIST_HEAD(&dso->node);
  893. INIT_LIST_HEAD(&dso->data.open_entry);
  894. pthread_mutex_init(&dso->lock, NULL);
  895. atomic_set(&dso->refcnt, 1);
  896. }
  897. return dso;
  898. }
  899. void dso__delete(struct dso *dso)
  900. {
  901. int i;
  902. if (!RB_EMPTY_NODE(&dso->rb_node))
  903. pr_err("DSO %s is still in rbtree when being deleted!\n",
  904. dso->long_name);
  905. for (i = 0; i < MAP__NR_TYPES; ++i)
  906. symbols__delete(&dso->symbols[i]);
  907. if (dso->short_name_allocated) {
  908. zfree((char **)&dso->short_name);
  909. dso->short_name_allocated = false;
  910. }
  911. if (dso->long_name_allocated) {
  912. zfree((char **)&dso->long_name);
  913. dso->long_name_allocated = false;
  914. }
  915. dso__data_close(dso);
  916. auxtrace_cache__free(dso->auxtrace_cache);
  917. dso_cache__free(dso);
  918. dso__free_a2l(dso);
  919. zfree(&dso->symsrc_filename);
  920. pthread_mutex_destroy(&dso->lock);
  921. free(dso);
  922. }
  923. struct dso *dso__get(struct dso *dso)
  924. {
  925. if (dso)
  926. atomic_inc(&dso->refcnt);
  927. return dso;
  928. }
  929. void dso__put(struct dso *dso)
  930. {
  931. if (dso && atomic_dec_and_test(&dso->refcnt))
  932. dso__delete(dso);
  933. }
  934. void dso__set_build_id(struct dso *dso, void *build_id)
  935. {
  936. memcpy(dso->build_id, build_id, sizeof(dso->build_id));
  937. dso->has_build_id = 1;
  938. }
  939. bool dso__build_id_equal(const struct dso *dso, u8 *build_id)
  940. {
  941. return memcmp(dso->build_id, build_id, sizeof(dso->build_id)) == 0;
  942. }
  943. void dso__read_running_kernel_build_id(struct dso *dso, struct machine *machine)
  944. {
  945. char path[PATH_MAX];
  946. if (machine__is_default_guest(machine))
  947. return;
  948. sprintf(path, "%s/sys/kernel/notes", machine->root_dir);
  949. if (sysfs__read_build_id(path, dso->build_id,
  950. sizeof(dso->build_id)) == 0)
  951. dso->has_build_id = true;
  952. }
  953. int dso__kernel_module_get_build_id(struct dso *dso,
  954. const char *root_dir)
  955. {
  956. char filename[PATH_MAX];
  957. /*
  958. * kernel module short names are of the form "[module]" and
  959. * we need just "module" here.
  960. */
  961. const char *name = dso->short_name + 1;
  962. snprintf(filename, sizeof(filename),
  963. "%s/sys/module/%.*s/notes/.note.gnu.build-id",
  964. root_dir, (int)strlen(name) - 1, name);
  965. if (sysfs__read_build_id(filename, dso->build_id,
  966. sizeof(dso->build_id)) == 0)
  967. dso->has_build_id = true;
  968. return 0;
  969. }
  970. bool __dsos__read_build_ids(struct list_head *head, bool with_hits)
  971. {
  972. bool have_build_id = false;
  973. struct dso *pos;
  974. list_for_each_entry(pos, head, node) {
  975. if (with_hits && !pos->hit)
  976. continue;
  977. if (pos->has_build_id) {
  978. have_build_id = true;
  979. continue;
  980. }
  981. if (filename__read_build_id(pos->long_name, pos->build_id,
  982. sizeof(pos->build_id)) > 0) {
  983. have_build_id = true;
  984. pos->has_build_id = true;
  985. }
  986. }
  987. return have_build_id;
  988. }
  989. void __dsos__add(struct dsos *dsos, struct dso *dso)
  990. {
  991. list_add_tail(&dso->node, &dsos->head);
  992. __dso__findlink_by_longname(&dsos->root, dso, NULL);
  993. /*
  994. * It is now in the linked list, grab a reference, then garbage collect
  995. * this when needing memory, by looking at LRU dso instances in the
  996. * list with atomic_read(&dso->refcnt) == 1, i.e. no references
  997. * anywhere besides the one for the list, do, under a lock for the
  998. * list: remove it from the list, then a dso__put(), that probably will
  999. * be the last and will then call dso__delete(), end of life.
  1000. *
  1001. * That, or at the end of the 'struct machine' lifetime, when all
  1002. * 'struct dso' instances will be removed from the list, in
  1003. * dsos__exit(), if they have no other reference from some other data
  1004. * structure.
  1005. *
  1006. * E.g.: after processing a 'perf.data' file and storing references
  1007. * to objects instantiated while processing events, we will have
  1008. * references to the 'thread', 'map', 'dso' structs all from 'struct
  1009. * hist_entry' instances, but we may not need anything not referenced,
  1010. * so we might as well call machines__exit()/machines__delete() and
  1011. * garbage collect it.
  1012. */
  1013. dso__get(dso);
  1014. }
  1015. void dsos__add(struct dsos *dsos, struct dso *dso)
  1016. {
  1017. pthread_rwlock_wrlock(&dsos->lock);
  1018. __dsos__add(dsos, dso);
  1019. pthread_rwlock_unlock(&dsos->lock);
  1020. }
  1021. struct dso *__dsos__find(struct dsos *dsos, const char *name, bool cmp_short)
  1022. {
  1023. struct dso *pos;
  1024. if (cmp_short) {
  1025. list_for_each_entry(pos, &dsos->head, node)
  1026. if (strcmp(pos->short_name, name) == 0)
  1027. return pos;
  1028. return NULL;
  1029. }
  1030. return __dso__find_by_longname(&dsos->root, name);
  1031. }
  1032. struct dso *dsos__find(struct dsos *dsos, const char *name, bool cmp_short)
  1033. {
  1034. struct dso *dso;
  1035. pthread_rwlock_rdlock(&dsos->lock);
  1036. dso = __dsos__find(dsos, name, cmp_short);
  1037. pthread_rwlock_unlock(&dsos->lock);
  1038. return dso;
  1039. }
  1040. struct dso *__dsos__addnew(struct dsos *dsos, const char *name)
  1041. {
  1042. struct dso *dso = dso__new(name);
  1043. if (dso != NULL) {
  1044. __dsos__add(dsos, dso);
  1045. dso__set_basename(dso);
  1046. }
  1047. return dso;
  1048. }
  1049. struct dso *__dsos__findnew(struct dsos *dsos, const char *name)
  1050. {
  1051. struct dso *dso = __dsos__find(dsos, name, false);
  1052. return dso ? dso : __dsos__addnew(dsos, name);
  1053. }
  1054. struct dso *dsos__findnew(struct dsos *dsos, const char *name)
  1055. {
  1056. struct dso *dso;
  1057. pthread_rwlock_wrlock(&dsos->lock);
  1058. dso = dso__get(__dsos__findnew(dsos, name));
  1059. pthread_rwlock_unlock(&dsos->lock);
  1060. return dso;
  1061. }
  1062. size_t __dsos__fprintf_buildid(struct list_head *head, FILE *fp,
  1063. bool (skip)(struct dso *dso, int parm), int parm)
  1064. {
  1065. struct dso *pos;
  1066. size_t ret = 0;
  1067. list_for_each_entry(pos, head, node) {
  1068. if (skip && skip(pos, parm))
  1069. continue;
  1070. ret += dso__fprintf_buildid(pos, fp);
  1071. ret += fprintf(fp, " %s\n", pos->long_name);
  1072. }
  1073. return ret;
  1074. }
  1075. size_t __dsos__fprintf(struct list_head *head, FILE *fp)
  1076. {
  1077. struct dso *pos;
  1078. size_t ret = 0;
  1079. list_for_each_entry(pos, head, node) {
  1080. int i;
  1081. for (i = 0; i < MAP__NR_TYPES; ++i)
  1082. ret += dso__fprintf(pos, i, fp);
  1083. }
  1084. return ret;
  1085. }
  1086. size_t dso__fprintf_buildid(struct dso *dso, FILE *fp)
  1087. {
  1088. char sbuild_id[BUILD_ID_SIZE * 2 + 1];
  1089. build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id);
  1090. return fprintf(fp, "%s", sbuild_id);
  1091. }
  1092. size_t dso__fprintf(struct dso *dso, enum map_type type, FILE *fp)
  1093. {
  1094. struct rb_node *nd;
  1095. size_t ret = fprintf(fp, "dso: %s (", dso->short_name);
  1096. if (dso->short_name != dso->long_name)
  1097. ret += fprintf(fp, "%s, ", dso->long_name);
  1098. ret += fprintf(fp, "%s, %sloaded, ", map_type__name[type],
  1099. dso__loaded(dso, type) ? "" : "NOT ");
  1100. ret += dso__fprintf_buildid(dso, fp);
  1101. ret += fprintf(fp, ")\n");
  1102. for (nd = rb_first(&dso->symbols[type]); nd; nd = rb_next(nd)) {
  1103. struct symbol *pos = rb_entry(nd, struct symbol, rb_node);
  1104. ret += symbol__fprintf(pos, fp);
  1105. }
  1106. return ret;
  1107. }
  1108. enum dso_type dso__type(struct dso *dso, struct machine *machine)
  1109. {
  1110. int fd;
  1111. enum dso_type type = DSO__TYPE_UNKNOWN;
  1112. fd = dso__data_get_fd(dso, machine);
  1113. if (fd >= 0) {
  1114. type = dso__type_fd(fd);
  1115. dso__data_put_fd(dso);
  1116. }
  1117. return type;
  1118. }
  1119. int dso__strerror_load(struct dso *dso, char *buf, size_t buflen)
  1120. {
  1121. int idx, errnum = dso->load_errno;
  1122. /*
  1123. * This must have a same ordering as the enum dso_load_errno.
  1124. */
  1125. static const char *dso_load__error_str[] = {
  1126. "Internal tools/perf/ library error",
  1127. "Invalid ELF file",
  1128. "Can not read build id",
  1129. "Mismatching build id",
  1130. "Decompression failure",
  1131. };
  1132. BUG_ON(buflen == 0);
  1133. if (errnum >= 0) {
  1134. const char *err = strerror_r(errnum, buf, buflen);
  1135. if (err != buf)
  1136. scnprintf(buf, buflen, "%s", err);
  1137. return 0;
  1138. }
  1139. if (errnum < __DSO_LOAD_ERRNO__START || errnum >= __DSO_LOAD_ERRNO__END)
  1140. return -1;
  1141. idx = errnum - __DSO_LOAD_ERRNO__START;
  1142. scnprintf(buf, buflen, "%s", dso_load__error_str[idx]);
  1143. return 0;
  1144. }