crash_dump.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672
  1. /*
  2. * S390 kdump implementation
  3. *
  4. * Copyright IBM Corp. 2011
  5. * Author(s): Michael Holzheu <holzheu@linux.vnet.ibm.com>
  6. */
  7. #include <linux/crash_dump.h>
  8. #include <asm/lowcore.h>
  9. #include <linux/kernel.h>
  10. #include <linux/module.h>
  11. #include <linux/gfp.h>
  12. #include <linux/slab.h>
  13. #include <linux/bootmem.h>
  14. #include <linux/elf.h>
  15. #include <linux/memblock.h>
  16. #include <asm/os_info.h>
  17. #include <asm/elf.h>
  18. #include <asm/ipl.h>
  19. #include <asm/sclp.h>
  20. #define PTR_ADD(x, y) (((char *) (x)) + ((unsigned long) (y)))
  21. #define PTR_SUB(x, y) (((char *) (x)) - ((unsigned long) (y)))
  22. #define PTR_DIFF(x, y) ((unsigned long)(((char *) (x)) - ((unsigned long) (y))))
  23. #define LINUX_NOTE_NAME "LINUX"
  24. static struct memblock_region oldmem_region;
  25. static struct memblock_type oldmem_type = {
  26. .cnt = 1,
  27. .max = 1,
  28. .total_size = 0,
  29. .regions = &oldmem_region,
  30. };
  31. struct dump_save_areas dump_save_areas;
  32. /*
  33. * Return physical address for virtual address
  34. */
  35. static inline void *load_real_addr(void *addr)
  36. {
  37. unsigned long real_addr;
  38. asm volatile(
  39. " lra %0,0(%1)\n"
  40. " jz 0f\n"
  41. " la %0,0\n"
  42. "0:"
  43. : "=a" (real_addr) : "a" (addr) : "cc");
  44. return (void *)real_addr;
  45. }
  46. /*
  47. * Copy real to virtual or real memory
  48. */
  49. static int copy_from_realmem(void *dest, void *src, size_t count)
  50. {
  51. unsigned long size;
  52. if (!count)
  53. return 0;
  54. if (!is_vmalloc_or_module_addr(dest))
  55. return memcpy_real(dest, src, count);
  56. do {
  57. size = min(count, PAGE_SIZE - (__pa(dest) & ~PAGE_MASK));
  58. if (memcpy_real(load_real_addr(dest), src, size))
  59. return -EFAULT;
  60. count -= size;
  61. dest += size;
  62. src += size;
  63. } while (count);
  64. return 0;
  65. }
  66. /*
  67. * Pointer to ELF header in new kernel
  68. */
  69. static void *elfcorehdr_newmem;
  70. /*
  71. * Copy one page from zfcpdump "oldmem"
  72. *
  73. * For pages below HSA size memory from the HSA is copied. Otherwise
  74. * real memory copy is used.
  75. */
  76. static ssize_t copy_oldmem_page_zfcpdump(char *buf, size_t csize,
  77. unsigned long src, int userbuf)
  78. {
  79. int rc;
  80. if (src < sclp.hsa_size) {
  81. rc = memcpy_hsa(buf, src, csize, userbuf);
  82. } else {
  83. if (userbuf)
  84. rc = copy_to_user_real((void __force __user *) buf,
  85. (void *) src, csize);
  86. else
  87. rc = memcpy_real(buf, (void *) src, csize);
  88. }
  89. return rc ? rc : csize;
  90. }
  91. /*
  92. * Copy one page from kdump "oldmem"
  93. *
  94. * For the kdump reserved memory this functions performs a swap operation:
  95. * - [OLDMEM_BASE - OLDMEM_BASE + OLDMEM_SIZE] is mapped to [0 - OLDMEM_SIZE].
  96. * - [0 - OLDMEM_SIZE] is mapped to [OLDMEM_BASE - OLDMEM_BASE + OLDMEM_SIZE]
  97. */
  98. static ssize_t copy_oldmem_page_kdump(char *buf, size_t csize,
  99. unsigned long src, int userbuf)
  100. {
  101. int rc;
  102. if (src < OLDMEM_SIZE)
  103. src += OLDMEM_BASE;
  104. else if (src > OLDMEM_BASE &&
  105. src < OLDMEM_BASE + OLDMEM_SIZE)
  106. src -= OLDMEM_BASE;
  107. if (userbuf)
  108. rc = copy_to_user_real((void __force __user *) buf,
  109. (void *) src, csize);
  110. else
  111. rc = copy_from_realmem(buf, (void *) src, csize);
  112. return (rc == 0) ? rc : csize;
  113. }
  114. /*
  115. * Copy one page from "oldmem"
  116. */
  117. ssize_t copy_oldmem_page(unsigned long pfn, char *buf, size_t csize,
  118. unsigned long offset, int userbuf)
  119. {
  120. unsigned long src;
  121. if (!csize)
  122. return 0;
  123. src = (pfn << PAGE_SHIFT) + offset;
  124. if (OLDMEM_BASE)
  125. return copy_oldmem_page_kdump(buf, csize, src, userbuf);
  126. else
  127. return copy_oldmem_page_zfcpdump(buf, csize, src, userbuf);
  128. }
  129. /*
  130. * Remap "oldmem" for kdump
  131. *
  132. * For the kdump reserved memory this functions performs a swap operation:
  133. * [0 - OLDMEM_SIZE] is mapped to [OLDMEM_BASE - OLDMEM_BASE + OLDMEM_SIZE]
  134. */
  135. static int remap_oldmem_pfn_range_kdump(struct vm_area_struct *vma,
  136. unsigned long from, unsigned long pfn,
  137. unsigned long size, pgprot_t prot)
  138. {
  139. unsigned long size_old;
  140. int rc;
  141. if (pfn < OLDMEM_SIZE >> PAGE_SHIFT) {
  142. size_old = min(size, OLDMEM_SIZE - (pfn << PAGE_SHIFT));
  143. rc = remap_pfn_range(vma, from,
  144. pfn + (OLDMEM_BASE >> PAGE_SHIFT),
  145. size_old, prot);
  146. if (rc || size == size_old)
  147. return rc;
  148. size -= size_old;
  149. from += size_old;
  150. pfn += size_old >> PAGE_SHIFT;
  151. }
  152. return remap_pfn_range(vma, from, pfn, size, prot);
  153. }
  154. /*
  155. * Remap "oldmem" for zfcpdump
  156. *
  157. * We only map available memory above HSA size. Memory below HSA size
  158. * is read on demand using the copy_oldmem_page() function.
  159. */
  160. static int remap_oldmem_pfn_range_zfcpdump(struct vm_area_struct *vma,
  161. unsigned long from,
  162. unsigned long pfn,
  163. unsigned long size, pgprot_t prot)
  164. {
  165. unsigned long hsa_end = sclp.hsa_size;
  166. unsigned long size_hsa;
  167. if (pfn < hsa_end >> PAGE_SHIFT) {
  168. size_hsa = min(size, hsa_end - (pfn << PAGE_SHIFT));
  169. if (size == size_hsa)
  170. return 0;
  171. size -= size_hsa;
  172. from += size_hsa;
  173. pfn += size_hsa >> PAGE_SHIFT;
  174. }
  175. return remap_pfn_range(vma, from, pfn, size, prot);
  176. }
  177. /*
  178. * Remap "oldmem" for kdump or zfcpdump
  179. */
  180. int remap_oldmem_pfn_range(struct vm_area_struct *vma, unsigned long from,
  181. unsigned long pfn, unsigned long size, pgprot_t prot)
  182. {
  183. if (OLDMEM_BASE)
  184. return remap_oldmem_pfn_range_kdump(vma, from, pfn, size, prot);
  185. else
  186. return remap_oldmem_pfn_range_zfcpdump(vma, from, pfn, size,
  187. prot);
  188. }
  189. /*
  190. * Copy memory from old kernel
  191. */
  192. int copy_from_oldmem(void *dest, void *src, size_t count)
  193. {
  194. unsigned long copied = 0;
  195. int rc;
  196. if (OLDMEM_BASE) {
  197. if ((unsigned long) src < OLDMEM_SIZE) {
  198. copied = min(count, OLDMEM_SIZE - (unsigned long) src);
  199. rc = copy_from_realmem(dest, src + OLDMEM_BASE, copied);
  200. if (rc)
  201. return rc;
  202. }
  203. } else {
  204. unsigned long hsa_end = sclp.hsa_size;
  205. if ((unsigned long) src < hsa_end) {
  206. copied = min(count, hsa_end - (unsigned long) src);
  207. rc = memcpy_hsa(dest, (unsigned long) src, copied, 0);
  208. if (rc)
  209. return rc;
  210. }
  211. }
  212. return copy_from_realmem(dest + copied, src + copied, count - copied);
  213. }
  214. /*
  215. * Alloc memory and panic in case of ENOMEM
  216. */
  217. static void *kzalloc_panic(int len)
  218. {
  219. void *rc;
  220. rc = kzalloc(len, GFP_KERNEL);
  221. if (!rc)
  222. panic("s390 kdump kzalloc (%d) failed", len);
  223. return rc;
  224. }
  225. /*
  226. * Initialize ELF note
  227. */
  228. static void *nt_init(void *buf, Elf64_Word type, void *desc, int d_len,
  229. const char *name)
  230. {
  231. Elf64_Nhdr *note;
  232. u64 len;
  233. note = (Elf64_Nhdr *)buf;
  234. note->n_namesz = strlen(name) + 1;
  235. note->n_descsz = d_len;
  236. note->n_type = type;
  237. len = sizeof(Elf64_Nhdr);
  238. memcpy(buf + len, name, note->n_namesz);
  239. len = roundup(len + note->n_namesz, 4);
  240. memcpy(buf + len, desc, note->n_descsz);
  241. len = roundup(len + note->n_descsz, 4);
  242. return PTR_ADD(buf, len);
  243. }
  244. /*
  245. * Initialize prstatus note
  246. */
  247. static void *nt_prstatus(void *ptr, struct save_area *sa)
  248. {
  249. struct elf_prstatus nt_prstatus;
  250. static int cpu_nr = 1;
  251. memset(&nt_prstatus, 0, sizeof(nt_prstatus));
  252. memcpy(&nt_prstatus.pr_reg.gprs, sa->gp_regs, sizeof(sa->gp_regs));
  253. memcpy(&nt_prstatus.pr_reg.psw, sa->psw, sizeof(sa->psw));
  254. memcpy(&nt_prstatus.pr_reg.acrs, sa->acc_regs, sizeof(sa->acc_regs));
  255. nt_prstatus.pr_pid = cpu_nr;
  256. cpu_nr++;
  257. return nt_init(ptr, NT_PRSTATUS, &nt_prstatus, sizeof(nt_prstatus),
  258. "CORE");
  259. }
  260. /*
  261. * Initialize fpregset (floating point) note
  262. */
  263. static void *nt_fpregset(void *ptr, struct save_area *sa)
  264. {
  265. elf_fpregset_t nt_fpregset;
  266. memset(&nt_fpregset, 0, sizeof(nt_fpregset));
  267. memcpy(&nt_fpregset.fpc, &sa->fp_ctrl_reg, sizeof(sa->fp_ctrl_reg));
  268. memcpy(&nt_fpregset.fprs, &sa->fp_regs, sizeof(sa->fp_regs));
  269. return nt_init(ptr, NT_PRFPREG, &nt_fpregset, sizeof(nt_fpregset),
  270. "CORE");
  271. }
  272. /*
  273. * Initialize timer note
  274. */
  275. static void *nt_s390_timer(void *ptr, struct save_area *sa)
  276. {
  277. return nt_init(ptr, NT_S390_TIMER, &sa->timer, sizeof(sa->timer),
  278. LINUX_NOTE_NAME);
  279. }
  280. /*
  281. * Initialize TOD clock comparator note
  282. */
  283. static void *nt_s390_tod_cmp(void *ptr, struct save_area *sa)
  284. {
  285. return nt_init(ptr, NT_S390_TODCMP, &sa->clk_cmp,
  286. sizeof(sa->clk_cmp), LINUX_NOTE_NAME);
  287. }
  288. /*
  289. * Initialize TOD programmable register note
  290. */
  291. static void *nt_s390_tod_preg(void *ptr, struct save_area *sa)
  292. {
  293. return nt_init(ptr, NT_S390_TODPREG, &sa->tod_reg,
  294. sizeof(sa->tod_reg), LINUX_NOTE_NAME);
  295. }
  296. /*
  297. * Initialize control register note
  298. */
  299. static void *nt_s390_ctrs(void *ptr, struct save_area *sa)
  300. {
  301. return nt_init(ptr, NT_S390_CTRS, &sa->ctrl_regs,
  302. sizeof(sa->ctrl_regs), LINUX_NOTE_NAME);
  303. }
  304. /*
  305. * Initialize prefix register note
  306. */
  307. static void *nt_s390_prefix(void *ptr, struct save_area *sa)
  308. {
  309. return nt_init(ptr, NT_S390_PREFIX, &sa->pref_reg,
  310. sizeof(sa->pref_reg), LINUX_NOTE_NAME);
  311. }
  312. /*
  313. * Initialize vxrs high note (full 128 bit VX registers 16-31)
  314. */
  315. static void *nt_s390_vx_high(void *ptr, __vector128 *vx_regs)
  316. {
  317. return nt_init(ptr, NT_S390_VXRS_HIGH, &vx_regs[16],
  318. 16 * sizeof(__vector128), LINUX_NOTE_NAME);
  319. }
  320. /*
  321. * Initialize vxrs low note (lower halves of VX registers 0-15)
  322. */
  323. static void *nt_s390_vx_low(void *ptr, __vector128 *vx_regs)
  324. {
  325. Elf64_Nhdr *note;
  326. u64 len;
  327. int i;
  328. note = (Elf64_Nhdr *)ptr;
  329. note->n_namesz = strlen(LINUX_NOTE_NAME) + 1;
  330. note->n_descsz = 16 * 8;
  331. note->n_type = NT_S390_VXRS_LOW;
  332. len = sizeof(Elf64_Nhdr);
  333. memcpy(ptr + len, LINUX_NOTE_NAME, note->n_namesz);
  334. len = roundup(len + note->n_namesz, 4);
  335. ptr += len;
  336. /* Copy lower halves of SIMD registers 0-15 */
  337. for (i = 0; i < 16; i++) {
  338. memcpy(ptr, &vx_regs[i].u[2], 8);
  339. ptr += 8;
  340. }
  341. return ptr;
  342. }
  343. /*
  344. * Fill ELF notes for one CPU with save area registers
  345. */
  346. void *fill_cpu_elf_notes(void *ptr, struct save_area *sa, __vector128 *vx_regs)
  347. {
  348. ptr = nt_prstatus(ptr, sa);
  349. ptr = nt_fpregset(ptr, sa);
  350. ptr = nt_s390_timer(ptr, sa);
  351. ptr = nt_s390_tod_cmp(ptr, sa);
  352. ptr = nt_s390_tod_preg(ptr, sa);
  353. ptr = nt_s390_ctrs(ptr, sa);
  354. ptr = nt_s390_prefix(ptr, sa);
  355. if (MACHINE_HAS_VX && vx_regs) {
  356. ptr = nt_s390_vx_low(ptr, vx_regs);
  357. ptr = nt_s390_vx_high(ptr, vx_regs);
  358. }
  359. return ptr;
  360. }
  361. /*
  362. * Initialize prpsinfo note (new kernel)
  363. */
  364. static void *nt_prpsinfo(void *ptr)
  365. {
  366. struct elf_prpsinfo prpsinfo;
  367. memset(&prpsinfo, 0, sizeof(prpsinfo));
  368. prpsinfo.pr_sname = 'R';
  369. strcpy(prpsinfo.pr_fname, "vmlinux");
  370. return nt_init(ptr, NT_PRPSINFO, &prpsinfo, sizeof(prpsinfo),
  371. KEXEC_CORE_NOTE_NAME);
  372. }
  373. /*
  374. * Get vmcoreinfo using lowcore->vmcore_info (new kernel)
  375. */
  376. static void *get_vmcoreinfo_old(unsigned long *size)
  377. {
  378. char nt_name[11], *vmcoreinfo;
  379. Elf64_Nhdr note;
  380. void *addr;
  381. if (copy_from_oldmem(&addr, &S390_lowcore.vmcore_info, sizeof(addr)))
  382. return NULL;
  383. memset(nt_name, 0, sizeof(nt_name));
  384. if (copy_from_oldmem(&note, addr, sizeof(note)))
  385. return NULL;
  386. if (copy_from_oldmem(nt_name, addr + sizeof(note), sizeof(nt_name) - 1))
  387. return NULL;
  388. if (strcmp(nt_name, "VMCOREINFO") != 0)
  389. return NULL;
  390. vmcoreinfo = kzalloc_panic(note.n_descsz);
  391. if (copy_from_oldmem(vmcoreinfo, addr + 24, note.n_descsz))
  392. return NULL;
  393. *size = note.n_descsz;
  394. return vmcoreinfo;
  395. }
  396. /*
  397. * Initialize vmcoreinfo note (new kernel)
  398. */
  399. static void *nt_vmcoreinfo(void *ptr)
  400. {
  401. unsigned long size;
  402. void *vmcoreinfo;
  403. vmcoreinfo = os_info_old_entry(OS_INFO_VMCOREINFO, &size);
  404. if (!vmcoreinfo)
  405. vmcoreinfo = get_vmcoreinfo_old(&size);
  406. if (!vmcoreinfo)
  407. return ptr;
  408. return nt_init(ptr, 0, vmcoreinfo, size, "VMCOREINFO");
  409. }
  410. /*
  411. * Initialize final note (needed for /proc/vmcore code)
  412. */
  413. static void *nt_final(void *ptr)
  414. {
  415. Elf64_Nhdr *note;
  416. note = (Elf64_Nhdr *) ptr;
  417. note->n_namesz = 0;
  418. note->n_descsz = 0;
  419. note->n_type = 0;
  420. return PTR_ADD(ptr, sizeof(Elf64_Nhdr));
  421. }
  422. /*
  423. * Initialize ELF header (new kernel)
  424. */
  425. static void *ehdr_init(Elf64_Ehdr *ehdr, int mem_chunk_cnt)
  426. {
  427. memset(ehdr, 0, sizeof(*ehdr));
  428. memcpy(ehdr->e_ident, ELFMAG, SELFMAG);
  429. ehdr->e_ident[EI_CLASS] = ELFCLASS64;
  430. ehdr->e_ident[EI_DATA] = ELFDATA2MSB;
  431. ehdr->e_ident[EI_VERSION] = EV_CURRENT;
  432. memset(ehdr->e_ident + EI_PAD, 0, EI_NIDENT - EI_PAD);
  433. ehdr->e_type = ET_CORE;
  434. ehdr->e_machine = EM_S390;
  435. ehdr->e_version = EV_CURRENT;
  436. ehdr->e_phoff = sizeof(Elf64_Ehdr);
  437. ehdr->e_ehsize = sizeof(Elf64_Ehdr);
  438. ehdr->e_phentsize = sizeof(Elf64_Phdr);
  439. ehdr->e_phnum = mem_chunk_cnt + 1;
  440. return ehdr + 1;
  441. }
  442. /*
  443. * Return CPU count for ELF header (new kernel)
  444. */
  445. static int get_cpu_cnt(void)
  446. {
  447. int i, cpus = 0;
  448. for (i = 0; i < dump_save_areas.count; i++) {
  449. if (dump_save_areas.areas[i]->sa.pref_reg == 0)
  450. continue;
  451. cpus++;
  452. }
  453. return cpus;
  454. }
  455. /*
  456. * Return memory chunk count for ELF header (new kernel)
  457. */
  458. static int get_mem_chunk_cnt(void)
  459. {
  460. int cnt = 0;
  461. u64 idx;
  462. for_each_mem_range(idx, &memblock.physmem, &oldmem_type, NUMA_NO_NODE,
  463. MEMBLOCK_NONE, NULL, NULL, NULL)
  464. cnt++;
  465. return cnt;
  466. }
  467. /*
  468. * Initialize ELF loads (new kernel)
  469. */
  470. static void loads_init(Elf64_Phdr *phdr, u64 loads_offset)
  471. {
  472. phys_addr_t start, end;
  473. u64 idx;
  474. for_each_mem_range(idx, &memblock.physmem, &oldmem_type, NUMA_NO_NODE,
  475. MEMBLOCK_NONE, &start, &end, NULL) {
  476. phdr->p_filesz = end - start;
  477. phdr->p_type = PT_LOAD;
  478. phdr->p_offset = start;
  479. phdr->p_vaddr = start;
  480. phdr->p_paddr = start;
  481. phdr->p_memsz = end - start;
  482. phdr->p_flags = PF_R | PF_W | PF_X;
  483. phdr->p_align = PAGE_SIZE;
  484. phdr++;
  485. }
  486. }
  487. /*
  488. * Initialize notes (new kernel)
  489. */
  490. static void *notes_init(Elf64_Phdr *phdr, void *ptr, u64 notes_offset)
  491. {
  492. struct save_area_ext *sa_ext;
  493. void *ptr_start = ptr;
  494. int i;
  495. ptr = nt_prpsinfo(ptr);
  496. for (i = 0; i < dump_save_areas.count; i++) {
  497. sa_ext = dump_save_areas.areas[i];
  498. if (sa_ext->sa.pref_reg == 0)
  499. continue;
  500. ptr = fill_cpu_elf_notes(ptr, &sa_ext->sa, sa_ext->vx_regs);
  501. }
  502. ptr = nt_vmcoreinfo(ptr);
  503. ptr = nt_final(ptr);
  504. memset(phdr, 0, sizeof(*phdr));
  505. phdr->p_type = PT_NOTE;
  506. phdr->p_offset = notes_offset;
  507. phdr->p_filesz = (unsigned long) PTR_SUB(ptr, ptr_start);
  508. phdr->p_memsz = phdr->p_filesz;
  509. return ptr;
  510. }
  511. /*
  512. * Create ELF core header (new kernel)
  513. */
  514. int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size)
  515. {
  516. Elf64_Phdr *phdr_notes, *phdr_loads;
  517. int mem_chunk_cnt;
  518. void *ptr, *hdr;
  519. u32 alloc_size;
  520. u64 hdr_off;
  521. /* If we are not in kdump or zfcpdump mode return */
  522. if (!OLDMEM_BASE && ipl_info.type != IPL_TYPE_FCP_DUMP)
  523. return 0;
  524. /* If elfcorehdr= has been passed via cmdline, we use that one */
  525. if (elfcorehdr_addr != ELFCORE_ADDR_MAX)
  526. return 0;
  527. /* If we cannot get HSA size for zfcpdump return error */
  528. if (ipl_info.type == IPL_TYPE_FCP_DUMP && !sclp.hsa_size)
  529. return -ENODEV;
  530. /* For kdump, exclude previous crashkernel memory */
  531. if (OLDMEM_BASE) {
  532. oldmem_region.base = OLDMEM_BASE;
  533. oldmem_region.size = OLDMEM_SIZE;
  534. oldmem_type.total_size = OLDMEM_SIZE;
  535. }
  536. mem_chunk_cnt = get_mem_chunk_cnt();
  537. alloc_size = 0x1000 + get_cpu_cnt() * 0x4a0 +
  538. mem_chunk_cnt * sizeof(Elf64_Phdr);
  539. hdr = kzalloc_panic(alloc_size);
  540. /* Init elf header */
  541. ptr = ehdr_init(hdr, mem_chunk_cnt);
  542. /* Init program headers */
  543. phdr_notes = ptr;
  544. ptr = PTR_ADD(ptr, sizeof(Elf64_Phdr));
  545. phdr_loads = ptr;
  546. ptr = PTR_ADD(ptr, sizeof(Elf64_Phdr) * mem_chunk_cnt);
  547. /* Init notes */
  548. hdr_off = PTR_DIFF(ptr, hdr);
  549. ptr = notes_init(phdr_notes, ptr, ((unsigned long) hdr) + hdr_off);
  550. /* Init loads */
  551. hdr_off = PTR_DIFF(ptr, hdr);
  552. loads_init(phdr_loads, hdr_off);
  553. *addr = (unsigned long long) hdr;
  554. elfcorehdr_newmem = hdr;
  555. *size = (unsigned long long) hdr_off;
  556. BUG_ON(elfcorehdr_size > alloc_size);
  557. return 0;
  558. }
  559. /*
  560. * Free ELF core header (new kernel)
  561. */
  562. void elfcorehdr_free(unsigned long long addr)
  563. {
  564. if (!elfcorehdr_newmem)
  565. return;
  566. kfree((void *)(unsigned long)addr);
  567. }
  568. /*
  569. * Read from ELF header
  570. */
  571. ssize_t elfcorehdr_read(char *buf, size_t count, u64 *ppos)
  572. {
  573. void *src = (void *)(unsigned long)*ppos;
  574. src = elfcorehdr_newmem ? src : src - OLDMEM_BASE;
  575. memcpy(buf, src, count);
  576. *ppos += count;
  577. return count;
  578. }
  579. /*
  580. * Read from ELF notes data
  581. */
  582. ssize_t elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos)
  583. {
  584. void *src = (void *)(unsigned long)*ppos;
  585. int rc;
  586. if (elfcorehdr_newmem) {
  587. memcpy(buf, src, count);
  588. } else {
  589. rc = copy_from_oldmem(buf, src, count);
  590. if (rc)
  591. return rc;
  592. }
  593. *ppos += count;
  594. return count;
  595. }