entry_32.S 24 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084
  1. /*
  2. * Copyright (C) 1991,1992 Linus Torvalds
  3. *
  4. * entry_32.S contains the system-call and low-level fault and trap handling routines.
  5. *
  6. * Stack layout while running C code:
  7. * ptrace needs to have all registers on the stack.
  8. * If the order here is changed, it needs to be
  9. * updated in fork.c:copy_process(), signal.c:do_signal(),
  10. * ptrace.c and ptrace.h
  11. *
  12. * 0(%esp) - %ebx
  13. * 4(%esp) - %ecx
  14. * 8(%esp) - %edx
  15. * C(%esp) - %esi
  16. * 10(%esp) - %edi
  17. * 14(%esp) - %ebp
  18. * 18(%esp) - %eax
  19. * 1C(%esp) - %ds
  20. * 20(%esp) - %es
  21. * 24(%esp) - %fs
  22. * 28(%esp) - %gs saved iff !CONFIG_X86_32_LAZY_GS
  23. * 2C(%esp) - orig_eax
  24. * 30(%esp) - %eip
  25. * 34(%esp) - %cs
  26. * 38(%esp) - %eflags
  27. * 3C(%esp) - %oldesp
  28. * 40(%esp) - %oldss
  29. */
  30. #include <linux/linkage.h>
  31. #include <linux/err.h>
  32. #include <asm/thread_info.h>
  33. #include <asm/irqflags.h>
  34. #include <asm/errno.h>
  35. #include <asm/segment.h>
  36. #include <asm/smp.h>
  37. #include <asm/page_types.h>
  38. #include <asm/percpu.h>
  39. #include <asm/processor-flags.h>
  40. #include <asm/ftrace.h>
  41. #include <asm/irq_vectors.h>
  42. #include <asm/cpufeatures.h>
  43. #include <asm/alternative-asm.h>
  44. #include <asm/asm.h>
  45. #include <asm/smap.h>
  46. #include <asm/nospec-branch.h>
  47. .section .entry.text, "ax"
  48. /*
  49. * We use macros for low-level operations which need to be overridden
  50. * for paravirtualization. The following will never clobber any registers:
  51. * INTERRUPT_RETURN (aka. "iret")
  52. * GET_CR0_INTO_EAX (aka. "movl %cr0, %eax")
  53. * ENABLE_INTERRUPTS_SYSEXIT (aka "sti; sysexit").
  54. *
  55. * For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must
  56. * specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY).
  57. * Allowing a register to be clobbered can shrink the paravirt replacement
  58. * enough to patch inline, increasing performance.
  59. */
  60. #ifdef CONFIG_PREEMPT
  61. # define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF
  62. #else
  63. # define preempt_stop(clobbers)
  64. # define resume_kernel restore_all
  65. #endif
  66. .macro TRACE_IRQS_IRET
  67. #ifdef CONFIG_TRACE_IRQFLAGS
  68. testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off?
  69. jz 1f
  70. TRACE_IRQS_ON
  71. 1:
  72. #endif
  73. .endm
  74. /*
  75. * User gs save/restore
  76. *
  77. * %gs is used for userland TLS and kernel only uses it for stack
  78. * canary which is required to be at %gs:20 by gcc. Read the comment
  79. * at the top of stackprotector.h for more info.
  80. *
  81. * Local labels 98 and 99 are used.
  82. */
  83. #ifdef CONFIG_X86_32_LAZY_GS
  84. /* unfortunately push/pop can't be no-op */
  85. .macro PUSH_GS
  86. pushl $0
  87. .endm
  88. .macro POP_GS pop=0
  89. addl $(4 + \pop), %esp
  90. .endm
  91. .macro POP_GS_EX
  92. .endm
  93. /* all the rest are no-op */
  94. .macro PTGS_TO_GS
  95. .endm
  96. .macro PTGS_TO_GS_EX
  97. .endm
  98. .macro GS_TO_REG reg
  99. .endm
  100. .macro REG_TO_PTGS reg
  101. .endm
  102. .macro SET_KERNEL_GS reg
  103. .endm
  104. #else /* CONFIG_X86_32_LAZY_GS */
  105. .macro PUSH_GS
  106. pushl %gs
  107. .endm
  108. .macro POP_GS pop=0
  109. 98: popl %gs
  110. .if \pop <> 0
  111. add $\pop, %esp
  112. .endif
  113. .endm
  114. .macro POP_GS_EX
  115. .pushsection .fixup, "ax"
  116. 99: movl $0, (%esp)
  117. jmp 98b
  118. .popsection
  119. _ASM_EXTABLE(98b, 99b)
  120. .endm
  121. .macro PTGS_TO_GS
  122. 98: mov PT_GS(%esp), %gs
  123. .endm
  124. .macro PTGS_TO_GS_EX
  125. .pushsection .fixup, "ax"
  126. 99: movl $0, PT_GS(%esp)
  127. jmp 98b
  128. .popsection
  129. _ASM_EXTABLE(98b, 99b)
  130. .endm
  131. .macro GS_TO_REG reg
  132. movl %gs, \reg
  133. .endm
  134. .macro REG_TO_PTGS reg
  135. movl \reg, PT_GS(%esp)
  136. .endm
  137. .macro SET_KERNEL_GS reg
  138. movl $(__KERNEL_STACK_CANARY), \reg
  139. movl \reg, %gs
  140. .endm
  141. #endif /* CONFIG_X86_32_LAZY_GS */
  142. .macro SAVE_ALL pt_regs_ax=%eax
  143. cld
  144. PUSH_GS
  145. pushl %fs
  146. pushl %es
  147. pushl %ds
  148. pushl \pt_regs_ax
  149. pushl %ebp
  150. pushl %edi
  151. pushl %esi
  152. pushl %edx
  153. pushl %ecx
  154. pushl %ebx
  155. movl $(__USER_DS), %edx
  156. movl %edx, %ds
  157. movl %edx, %es
  158. movl $(__KERNEL_PERCPU), %edx
  159. movl %edx, %fs
  160. SET_KERNEL_GS %edx
  161. .endm
  162. .macro RESTORE_INT_REGS
  163. popl %ebx
  164. popl %ecx
  165. popl %edx
  166. popl %esi
  167. popl %edi
  168. popl %ebp
  169. popl %eax
  170. .endm
  171. .macro RESTORE_REGS pop=0
  172. RESTORE_INT_REGS
  173. 1: popl %ds
  174. 2: popl %es
  175. 3: popl %fs
  176. POP_GS \pop
  177. .pushsection .fixup, "ax"
  178. 4: movl $0, (%esp)
  179. jmp 1b
  180. 5: movl $0, (%esp)
  181. jmp 2b
  182. 6: movl $0, (%esp)
  183. jmp 3b
  184. .popsection
  185. _ASM_EXTABLE(1b, 4b)
  186. _ASM_EXTABLE(2b, 5b)
  187. _ASM_EXTABLE(3b, 6b)
  188. POP_GS_EX
  189. .endm
  190. ENTRY(ret_from_fork)
  191. pushl %eax
  192. call schedule_tail
  193. GET_THREAD_INFO(%ebp)
  194. popl %eax
  195. pushl $0x0202 # Reset kernel eflags
  196. popfl
  197. /* When we fork, we trace the syscall return in the child, too. */
  198. movl %esp, %eax
  199. call syscall_return_slowpath
  200. jmp restore_all
  201. END(ret_from_fork)
  202. ENTRY(ret_from_kernel_thread)
  203. pushl %eax
  204. call schedule_tail
  205. GET_THREAD_INFO(%ebp)
  206. popl %eax
  207. pushl $0x0202 # Reset kernel eflags
  208. popfl
  209. movl PT_EBP(%esp), %eax
  210. movl PT_EBX(%esp), %edx
  211. CALL_NOSPEC %edx
  212. movl $0, PT_EAX(%esp)
  213. /*
  214. * Kernel threads return to userspace as if returning from a syscall.
  215. * We should check whether anything actually uses this path and, if so,
  216. * consider switching it over to ret_from_fork.
  217. */
  218. movl %esp, %eax
  219. call syscall_return_slowpath
  220. jmp restore_all
  221. ENDPROC(ret_from_kernel_thread)
  222. /*
  223. * Return to user mode is not as complex as all this looks,
  224. * but we want the default path for a system call return to
  225. * go as quickly as possible which is why some of this is
  226. * less clear than it otherwise should be.
  227. */
  228. # userspace resumption stub bypassing syscall exit tracing
  229. ALIGN
  230. ret_from_exception:
  231. preempt_stop(CLBR_ANY)
  232. ret_from_intr:
  233. GET_THREAD_INFO(%ebp)
  234. #ifdef CONFIG_VM86
  235. movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS
  236. movb PT_CS(%esp), %al
  237. andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
  238. #else
  239. /*
  240. * We can be coming here from child spawned by kernel_thread().
  241. */
  242. movl PT_CS(%esp), %eax
  243. andl $SEGMENT_RPL_MASK, %eax
  244. #endif
  245. cmpl $USER_RPL, %eax
  246. jb resume_kernel # not returning to v8086 or userspace
  247. ENTRY(resume_userspace)
  248. DISABLE_INTERRUPTS(CLBR_ANY)
  249. TRACE_IRQS_OFF
  250. movl %esp, %eax
  251. call prepare_exit_to_usermode
  252. jmp restore_all
  253. END(ret_from_exception)
  254. #ifdef CONFIG_PREEMPT
  255. ENTRY(resume_kernel)
  256. DISABLE_INTERRUPTS(CLBR_ANY)
  257. need_resched:
  258. cmpl $0, PER_CPU_VAR(__preempt_count)
  259. jnz restore_all
  260. testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off (exception path) ?
  261. jz restore_all
  262. call preempt_schedule_irq
  263. jmp need_resched
  264. END(resume_kernel)
  265. #endif
  266. # SYSENTER call handler stub
  267. ENTRY(entry_SYSENTER_32)
  268. movl TSS_sysenter_sp0(%esp), %esp
  269. sysenter_past_esp:
  270. pushl $__USER_DS /* pt_regs->ss */
  271. pushl %ebp /* pt_regs->sp (stashed in bp) */
  272. pushfl /* pt_regs->flags (except IF = 0) */
  273. orl $X86_EFLAGS_IF, (%esp) /* Fix IF */
  274. pushl $__USER_CS /* pt_regs->cs */
  275. pushl $0 /* pt_regs->ip = 0 (placeholder) */
  276. pushl %eax /* pt_regs->orig_ax */
  277. SAVE_ALL pt_regs_ax=$-ENOSYS /* save rest */
  278. /*
  279. * User mode is traced as though IRQs are on, and SYSENTER
  280. * turned them off.
  281. */
  282. TRACE_IRQS_OFF
  283. movl %esp, %eax
  284. call do_fast_syscall_32
  285. /* XEN PV guests always use IRET path */
  286. ALTERNATIVE "testl %eax, %eax; jz .Lsyscall_32_done", \
  287. "jmp .Lsyscall_32_done", X86_FEATURE_XENPV
  288. /* Opportunistic SYSEXIT */
  289. TRACE_IRQS_ON /* User mode traces as IRQs on. */
  290. movl PT_EIP(%esp), %edx /* pt_regs->ip */
  291. movl PT_OLDESP(%esp), %ecx /* pt_regs->sp */
  292. 1: mov PT_FS(%esp), %fs
  293. PTGS_TO_GS
  294. popl %ebx /* pt_regs->bx */
  295. addl $2*4, %esp /* skip pt_regs->cx and pt_regs->dx */
  296. popl %esi /* pt_regs->si */
  297. popl %edi /* pt_regs->di */
  298. popl %ebp /* pt_regs->bp */
  299. popl %eax /* pt_regs->ax */
  300. /*
  301. * Return back to the vDSO, which will pop ecx and edx.
  302. * Don't bother with DS and ES (they already contain __USER_DS).
  303. */
  304. ENABLE_INTERRUPTS_SYSEXIT
  305. .pushsection .fixup, "ax"
  306. 2: movl $0, PT_FS(%esp)
  307. jmp 1b
  308. .popsection
  309. _ASM_EXTABLE(1b, 2b)
  310. PTGS_TO_GS_EX
  311. ENDPROC(entry_SYSENTER_32)
  312. # system call handler stub
  313. ENTRY(entry_INT80_32)
  314. ASM_CLAC
  315. pushl %eax /* pt_regs->orig_ax */
  316. SAVE_ALL pt_regs_ax=$-ENOSYS /* save rest */
  317. /*
  318. * User mode is traced as though IRQs are on. Unlike the 64-bit
  319. * case, INT80 is a trap gate on 32-bit kernels, so interrupts
  320. * are already on (unless user code is messing around with iopl).
  321. */
  322. movl %esp, %eax
  323. call do_syscall_32_irqs_on
  324. .Lsyscall_32_done:
  325. restore_all:
  326. TRACE_IRQS_IRET
  327. restore_all_notrace:
  328. #ifdef CONFIG_X86_ESPFIX32
  329. movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
  330. /*
  331. * Warning: PT_OLDSS(%esp) contains the wrong/random values if we
  332. * are returning to the kernel.
  333. * See comments in process.c:copy_thread() for details.
  334. */
  335. movb PT_OLDSS(%esp), %ah
  336. movb PT_CS(%esp), %al
  337. andl $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
  338. cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
  339. je ldt_ss # returning to user-space with LDT SS
  340. #endif
  341. restore_nocheck:
  342. RESTORE_REGS 4 # skip orig_eax/error_code
  343. irq_return:
  344. INTERRUPT_RETURN
  345. .section .fixup, "ax"
  346. ENTRY(iret_exc )
  347. pushl $0 # no error code
  348. pushl $do_iret_error
  349. jmp error_code
  350. .previous
  351. _ASM_EXTABLE(irq_return, iret_exc)
  352. #ifdef CONFIG_X86_ESPFIX32
  353. ldt_ss:
  354. #ifdef CONFIG_PARAVIRT
  355. /*
  356. * The kernel can't run on a non-flat stack if paravirt mode
  357. * is active. Rather than try to fixup the high bits of
  358. * ESP, bypass this code entirely. This may break DOSemu
  359. * and/or Wine support in a paravirt VM, although the option
  360. * is still available to implement the setting of the high
  361. * 16-bits in the INTERRUPT_RETURN paravirt-op.
  362. */
  363. cmpl $0, pv_info+PARAVIRT_enabled
  364. jne restore_nocheck
  365. #endif
  366. /*
  367. * Setup and switch to ESPFIX stack
  368. *
  369. * We're returning to userspace with a 16 bit stack. The CPU will not
  370. * restore the high word of ESP for us on executing iret... This is an
  371. * "official" bug of all the x86-compatible CPUs, which we can work
  372. * around to make dosemu and wine happy. We do this by preloading the
  373. * high word of ESP with the high word of the userspace ESP while
  374. * compensating for the offset by changing to the ESPFIX segment with
  375. * a base address that matches for the difference.
  376. */
  377. #define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
  378. mov %esp, %edx /* load kernel esp */
  379. mov PT_OLDESP(%esp), %eax /* load userspace esp */
  380. mov %dx, %ax /* eax: new kernel esp */
  381. sub %eax, %edx /* offset (low word is 0) */
  382. shr $16, %edx
  383. mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
  384. mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
  385. pushl $__ESPFIX_SS
  386. pushl %eax /* new kernel esp */
  387. /*
  388. * Disable interrupts, but do not irqtrace this section: we
  389. * will soon execute iret and the tracer was already set to
  390. * the irqstate after the IRET:
  391. */
  392. DISABLE_INTERRUPTS(CLBR_EAX)
  393. lss (%esp), %esp /* switch to espfix segment */
  394. jmp restore_nocheck
  395. #endif
  396. ENDPROC(entry_INT80_32)
  397. .macro FIXUP_ESPFIX_STACK
  398. /*
  399. * Switch back for ESPFIX stack to the normal zerobased stack
  400. *
  401. * We can't call C functions using the ESPFIX stack. This code reads
  402. * the high word of the segment base from the GDT and swiches to the
  403. * normal stack and adjusts ESP with the matching offset.
  404. */
  405. #ifdef CONFIG_X86_ESPFIX32
  406. /* fixup the stack */
  407. mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
  408. mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
  409. shl $16, %eax
  410. addl %esp, %eax /* the adjusted stack pointer */
  411. pushl $__KERNEL_DS
  412. pushl %eax
  413. lss (%esp), %esp /* switch to the normal stack segment */
  414. #endif
  415. .endm
  416. .macro UNWIND_ESPFIX_STACK
  417. #ifdef CONFIG_X86_ESPFIX32
  418. movl %ss, %eax
  419. /* see if on espfix stack */
  420. cmpw $__ESPFIX_SS, %ax
  421. jne 27f
  422. movl $__KERNEL_DS, %eax
  423. movl %eax, %ds
  424. movl %eax, %es
  425. /* switch to normal stack */
  426. FIXUP_ESPFIX_STACK
  427. 27:
  428. #endif
  429. .endm
  430. /*
  431. * Build the entry stubs with some assembler magic.
  432. * We pack 1 stub into every 8-byte block.
  433. */
  434. .align 8
  435. ENTRY(irq_entries_start)
  436. vector=FIRST_EXTERNAL_VECTOR
  437. .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR)
  438. pushl $(~vector+0x80) /* Note: always in signed byte range */
  439. vector=vector+1
  440. jmp common_interrupt
  441. .align 8
  442. .endr
  443. END(irq_entries_start)
  444. /*
  445. * the CPU automatically disables interrupts when executing an IRQ vector,
  446. * so IRQ-flags tracing has to follow that:
  447. */
  448. .p2align CONFIG_X86_L1_CACHE_SHIFT
  449. common_interrupt:
  450. ASM_CLAC
  451. addl $-0x80, (%esp) /* Adjust vector into the [-256, -1] range */
  452. SAVE_ALL
  453. TRACE_IRQS_OFF
  454. movl %esp, %eax
  455. call do_IRQ
  456. jmp ret_from_intr
  457. ENDPROC(common_interrupt)
  458. #define BUILD_INTERRUPT3(name, nr, fn) \
  459. ENTRY(name) \
  460. ASM_CLAC; \
  461. pushl $~(nr); \
  462. SAVE_ALL; \
  463. TRACE_IRQS_OFF \
  464. movl %esp, %eax; \
  465. call fn; \
  466. jmp ret_from_intr; \
  467. ENDPROC(name)
  468. #ifdef CONFIG_TRACING
  469. # define TRACE_BUILD_INTERRUPT(name, nr) BUILD_INTERRUPT3(trace_##name, nr, smp_trace_##name)
  470. #else
  471. # define TRACE_BUILD_INTERRUPT(name, nr)
  472. #endif
  473. #define BUILD_INTERRUPT(name, nr) \
  474. BUILD_INTERRUPT3(name, nr, smp_##name); \
  475. TRACE_BUILD_INTERRUPT(name, nr)
  476. /* The include is where all of the SMP etc. interrupts come from */
  477. #include <asm/entry_arch.h>
  478. ENTRY(coprocessor_error)
  479. ASM_CLAC
  480. pushl $0
  481. pushl $do_coprocessor_error
  482. jmp error_code
  483. END(coprocessor_error)
  484. ENTRY(simd_coprocessor_error)
  485. ASM_CLAC
  486. pushl $0
  487. #ifdef CONFIG_X86_INVD_BUG
  488. /* AMD 486 bug: invd from userspace calls exception 19 instead of #GP */
  489. ALTERNATIVE "pushl $do_general_protection", \
  490. "pushl $do_simd_coprocessor_error", \
  491. X86_FEATURE_XMM
  492. #else
  493. pushl $do_simd_coprocessor_error
  494. #endif
  495. jmp error_code
  496. END(simd_coprocessor_error)
  497. ENTRY(device_not_available)
  498. ASM_CLAC
  499. pushl $-1 # mark this as an int
  500. pushl $do_device_not_available
  501. jmp error_code
  502. END(device_not_available)
  503. #ifdef CONFIG_PARAVIRT
  504. ENTRY(native_iret)
  505. iret
  506. _ASM_EXTABLE(native_iret, iret_exc)
  507. END(native_iret)
  508. ENTRY(native_irq_enable_sysexit)
  509. sti
  510. sysexit
  511. END(native_irq_enable_sysexit)
  512. #endif
  513. ENTRY(overflow)
  514. ASM_CLAC
  515. pushl $0
  516. pushl $do_overflow
  517. jmp error_code
  518. END(overflow)
  519. ENTRY(bounds)
  520. ASM_CLAC
  521. pushl $0
  522. pushl $do_bounds
  523. jmp error_code
  524. END(bounds)
  525. ENTRY(invalid_op)
  526. ASM_CLAC
  527. pushl $0
  528. pushl $do_invalid_op
  529. jmp error_code
  530. END(invalid_op)
  531. ENTRY(coprocessor_segment_overrun)
  532. ASM_CLAC
  533. pushl $0
  534. pushl $do_coprocessor_segment_overrun
  535. jmp error_code
  536. END(coprocessor_segment_overrun)
  537. ENTRY(invalid_TSS)
  538. ASM_CLAC
  539. pushl $do_invalid_TSS
  540. jmp error_code
  541. END(invalid_TSS)
  542. ENTRY(segment_not_present)
  543. ASM_CLAC
  544. pushl $do_segment_not_present
  545. jmp error_code
  546. END(segment_not_present)
  547. ENTRY(stack_segment)
  548. ASM_CLAC
  549. pushl $do_stack_segment
  550. jmp error_code
  551. END(stack_segment)
  552. ENTRY(alignment_check)
  553. ASM_CLAC
  554. pushl $do_alignment_check
  555. jmp error_code
  556. END(alignment_check)
  557. ENTRY(divide_error)
  558. ASM_CLAC
  559. pushl $0 # no error code
  560. pushl $do_divide_error
  561. jmp error_code
  562. END(divide_error)
  563. #ifdef CONFIG_X86_MCE
  564. ENTRY(machine_check)
  565. ASM_CLAC
  566. pushl $0
  567. pushl machine_check_vector
  568. jmp error_code
  569. END(machine_check)
  570. #endif
  571. ENTRY(spurious_interrupt_bug)
  572. ASM_CLAC
  573. pushl $0
  574. pushl $do_spurious_interrupt_bug
  575. jmp error_code
  576. END(spurious_interrupt_bug)
  577. #ifdef CONFIG_XEN
  578. /*
  579. * Xen doesn't set %esp to be precisely what the normal SYSENTER
  580. * entry point expects, so fix it up before using the normal path.
  581. */
  582. ENTRY(xen_sysenter_target)
  583. addl $5*4, %esp /* remove xen-provided frame */
  584. jmp sysenter_past_esp
  585. ENTRY(xen_hypervisor_callback)
  586. pushl $-1 /* orig_ax = -1 => not a system call */
  587. SAVE_ALL
  588. TRACE_IRQS_OFF
  589. /*
  590. * Check to see if we got the event in the critical
  591. * region in xen_iret_direct, after we've reenabled
  592. * events and checked for pending events. This simulates
  593. * iret instruction's behaviour where it delivers a
  594. * pending interrupt when enabling interrupts:
  595. */
  596. movl PT_EIP(%esp), %eax
  597. cmpl $xen_iret_start_crit, %eax
  598. jb 1f
  599. cmpl $xen_iret_end_crit, %eax
  600. jae 1f
  601. jmp xen_iret_crit_fixup
  602. ENTRY(xen_do_upcall)
  603. 1: mov %esp, %eax
  604. call xen_evtchn_do_upcall
  605. #ifndef CONFIG_PREEMPT
  606. call xen_maybe_preempt_hcall
  607. #endif
  608. jmp ret_from_intr
  609. ENDPROC(xen_hypervisor_callback)
  610. /*
  611. * Hypervisor uses this for application faults while it executes.
  612. * We get here for two reasons:
  613. * 1. Fault while reloading DS, ES, FS or GS
  614. * 2. Fault while executing IRET
  615. * Category 1 we fix up by reattempting the load, and zeroing the segment
  616. * register if the load fails.
  617. * Category 2 we fix up by jumping to do_iret_error. We cannot use the
  618. * normal Linux return path in this case because if we use the IRET hypercall
  619. * to pop the stack frame we end up in an infinite loop of failsafe callbacks.
  620. * We distinguish between categories by maintaining a status value in EAX.
  621. */
  622. ENTRY(xen_failsafe_callback)
  623. pushl %eax
  624. movl $1, %eax
  625. 1: mov 4(%esp), %ds
  626. 2: mov 8(%esp), %es
  627. 3: mov 12(%esp), %fs
  628. 4: mov 16(%esp), %gs
  629. /* EAX == 0 => Category 1 (Bad segment)
  630. EAX != 0 => Category 2 (Bad IRET) */
  631. testl %eax, %eax
  632. popl %eax
  633. lea 16(%esp), %esp
  634. jz 5f
  635. jmp iret_exc
  636. 5: pushl $-1 /* orig_ax = -1 => not a system call */
  637. SAVE_ALL
  638. jmp ret_from_exception
  639. .section .fixup, "ax"
  640. 6: xorl %eax, %eax
  641. movl %eax, 4(%esp)
  642. jmp 1b
  643. 7: xorl %eax, %eax
  644. movl %eax, 8(%esp)
  645. jmp 2b
  646. 8: xorl %eax, %eax
  647. movl %eax, 12(%esp)
  648. jmp 3b
  649. 9: xorl %eax, %eax
  650. movl %eax, 16(%esp)
  651. jmp 4b
  652. .previous
  653. _ASM_EXTABLE(1b, 6b)
  654. _ASM_EXTABLE(2b, 7b)
  655. _ASM_EXTABLE(3b, 8b)
  656. _ASM_EXTABLE(4b, 9b)
  657. ENDPROC(xen_failsafe_callback)
  658. BUILD_INTERRUPT3(xen_hvm_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
  659. xen_evtchn_do_upcall)
  660. #endif /* CONFIG_XEN */
  661. #if IS_ENABLED(CONFIG_HYPERV)
  662. BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
  663. hyperv_vector_handler)
  664. #endif /* CONFIG_HYPERV */
  665. #ifdef CONFIG_FUNCTION_TRACER
  666. #ifdef CONFIG_DYNAMIC_FTRACE
  667. ENTRY(mcount)
  668. ret
  669. END(mcount)
  670. ENTRY(ftrace_caller)
  671. pushl %eax
  672. pushl %ecx
  673. pushl %edx
  674. pushl $0 /* Pass NULL as regs pointer */
  675. movl 4*4(%esp), %eax
  676. movl 0x4(%ebp), %edx
  677. movl function_trace_op, %ecx
  678. subl $MCOUNT_INSN_SIZE, %eax
  679. .globl ftrace_call
  680. ftrace_call:
  681. call ftrace_stub
  682. addl $4, %esp /* skip NULL pointer */
  683. popl %edx
  684. popl %ecx
  685. popl %eax
  686. ftrace_ret:
  687. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  688. .globl ftrace_graph_call
  689. ftrace_graph_call:
  690. jmp ftrace_stub
  691. #endif
  692. /* This is weak to keep gas from relaxing the jumps */
  693. WEAK(ftrace_stub)
  694. ret
  695. END(ftrace_caller)
  696. ENTRY(ftrace_regs_caller)
  697. pushf /* push flags before compare (in cs location) */
  698. /*
  699. * i386 does not save SS and ESP when coming from kernel.
  700. * Instead, to get sp, &regs->sp is used (see ptrace.h).
  701. * Unfortunately, that means eflags must be at the same location
  702. * as the current return ip is. We move the return ip into the
  703. * ip location, and move flags into the return ip location.
  704. */
  705. pushl 4(%esp) /* save return ip into ip slot */
  706. pushl $0 /* Load 0 into orig_ax */
  707. pushl %gs
  708. pushl %fs
  709. pushl %es
  710. pushl %ds
  711. pushl %eax
  712. pushl %ebp
  713. pushl %edi
  714. pushl %esi
  715. pushl %edx
  716. pushl %ecx
  717. pushl %ebx
  718. movl 13*4(%esp), %eax /* Get the saved flags */
  719. movl %eax, 14*4(%esp) /* Move saved flags into regs->flags location */
  720. /* clobbering return ip */
  721. movl $__KERNEL_CS, 13*4(%esp)
  722. movl 12*4(%esp), %eax /* Load ip (1st parameter) */
  723. subl $MCOUNT_INSN_SIZE, %eax /* Adjust ip */
  724. movl 0x4(%ebp), %edx /* Load parent ip (2nd parameter) */
  725. movl function_trace_op, %ecx /* Save ftrace_pos in 3rd parameter */
  726. pushl %esp /* Save pt_regs as 4th parameter */
  727. GLOBAL(ftrace_regs_call)
  728. call ftrace_stub
  729. addl $4, %esp /* Skip pt_regs */
  730. movl 14*4(%esp), %eax /* Move flags back into cs */
  731. movl %eax, 13*4(%esp) /* Needed to keep addl from modifying flags */
  732. movl 12*4(%esp), %eax /* Get return ip from regs->ip */
  733. movl %eax, 14*4(%esp) /* Put return ip back for ret */
  734. popl %ebx
  735. popl %ecx
  736. popl %edx
  737. popl %esi
  738. popl %edi
  739. popl %ebp
  740. popl %eax
  741. popl %ds
  742. popl %es
  743. popl %fs
  744. popl %gs
  745. addl $8, %esp /* Skip orig_ax and ip */
  746. popf /* Pop flags at end (no addl to corrupt flags) */
  747. jmp ftrace_ret
  748. popf
  749. jmp ftrace_stub
  750. #else /* ! CONFIG_DYNAMIC_FTRACE */
  751. ENTRY(mcount)
  752. cmpl $__PAGE_OFFSET, %esp
  753. jb ftrace_stub /* Paging not enabled yet? */
  754. cmpl $ftrace_stub, ftrace_trace_function
  755. jnz trace
  756. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  757. cmpl $ftrace_stub, ftrace_graph_return
  758. jnz ftrace_graph_caller
  759. cmpl $ftrace_graph_entry_stub, ftrace_graph_entry
  760. jnz ftrace_graph_caller
  761. #endif
  762. .globl ftrace_stub
  763. ftrace_stub:
  764. ret
  765. /* taken from glibc */
  766. trace:
  767. pushl %eax
  768. pushl %ecx
  769. pushl %edx
  770. movl 0xc(%esp), %eax
  771. movl 0x4(%ebp), %edx
  772. subl $MCOUNT_INSN_SIZE, %eax
  773. movl ftrace_trace_function, %ecx
  774. CALL_NOSPEC %ecx
  775. popl %edx
  776. popl %ecx
  777. popl %eax
  778. jmp ftrace_stub
  779. END(mcount)
  780. #endif /* CONFIG_DYNAMIC_FTRACE */
  781. #endif /* CONFIG_FUNCTION_TRACER */
  782. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  783. ENTRY(ftrace_graph_caller)
  784. pushl %eax
  785. pushl %ecx
  786. pushl %edx
  787. movl 0xc(%esp), %eax
  788. lea 0x4(%ebp), %edx
  789. movl (%ebp), %ecx
  790. subl $MCOUNT_INSN_SIZE, %eax
  791. call prepare_ftrace_return
  792. popl %edx
  793. popl %ecx
  794. popl %eax
  795. ret
  796. END(ftrace_graph_caller)
  797. .globl return_to_handler
  798. return_to_handler:
  799. pushl %eax
  800. pushl %edx
  801. movl %ebp, %eax
  802. call ftrace_return_to_handler
  803. movl %eax, %ecx
  804. popl %edx
  805. popl %eax
  806. JMP_NOSPEC %ecx
  807. #endif
  808. #ifdef CONFIG_TRACING
  809. ENTRY(trace_page_fault)
  810. ASM_CLAC
  811. pushl $trace_do_page_fault
  812. jmp error_code
  813. END(trace_page_fault)
  814. #endif
  815. ENTRY(page_fault)
  816. ASM_CLAC
  817. pushl $do_page_fault
  818. ALIGN
  819. error_code:
  820. /* the function address is in %gs's slot on the stack */
  821. pushl %fs
  822. pushl %es
  823. pushl %ds
  824. pushl %eax
  825. pushl %ebp
  826. pushl %edi
  827. pushl %esi
  828. pushl %edx
  829. pushl %ecx
  830. pushl %ebx
  831. cld
  832. movl $(__KERNEL_PERCPU), %ecx
  833. movl %ecx, %fs
  834. UNWIND_ESPFIX_STACK
  835. GS_TO_REG %ecx
  836. movl PT_GS(%esp), %edi # get the function address
  837. movl PT_ORIG_EAX(%esp), %edx # get the error code
  838. movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
  839. REG_TO_PTGS %ecx
  840. SET_KERNEL_GS %ecx
  841. movl $(__USER_DS), %ecx
  842. movl %ecx, %ds
  843. movl %ecx, %es
  844. TRACE_IRQS_OFF
  845. movl %esp, %eax # pt_regs pointer
  846. CALL_NOSPEC %edi
  847. jmp ret_from_exception
  848. END(page_fault)
  849. /*
  850. * Debug traps and NMI can happen at the one SYSENTER instruction
  851. * that sets up the real kernel stack. Check here, since we can't
  852. * allow the wrong stack to be used.
  853. *
  854. * "TSS_sysenter_sp0+12" is because the NMI/debug handler will have
  855. * already pushed 3 words if it hits on the sysenter instruction:
  856. * eflags, cs and eip.
  857. *
  858. * We just load the right stack, and push the three (known) values
  859. * by hand onto the new stack - while updating the return eip past
  860. * the instruction that would have done it for sysenter.
  861. */
  862. .macro FIX_STACK offset ok label
  863. cmpw $__KERNEL_CS, 4(%esp)
  864. jne \ok
  865. \label:
  866. movl TSS_sysenter_sp0 + \offset(%esp), %esp
  867. pushfl
  868. pushl $__KERNEL_CS
  869. pushl $sysenter_past_esp
  870. .endm
  871. ENTRY(debug)
  872. ASM_CLAC
  873. cmpl $entry_SYSENTER_32, (%esp)
  874. jne debug_stack_correct
  875. FIX_STACK 12, debug_stack_correct, debug_esp_fix_insn
  876. debug_stack_correct:
  877. pushl $-1 # mark this as an int
  878. SAVE_ALL
  879. TRACE_IRQS_OFF
  880. xorl %edx, %edx # error code 0
  881. movl %esp, %eax # pt_regs pointer
  882. call do_debug
  883. jmp ret_from_exception
  884. END(debug)
  885. /*
  886. * NMI is doubly nasty. It can happen _while_ we're handling
  887. * a debug fault, and the debug fault hasn't yet been able to
  888. * clear up the stack. So we first check whether we got an
  889. * NMI on the sysenter entry path, but after that we need to
  890. * check whether we got an NMI on the debug path where the debug
  891. * fault happened on the sysenter path.
  892. */
  893. ENTRY(nmi)
  894. ASM_CLAC
  895. #ifdef CONFIG_X86_ESPFIX32
  896. pushl %eax
  897. movl %ss, %eax
  898. cmpw $__ESPFIX_SS, %ax
  899. popl %eax
  900. je nmi_espfix_stack
  901. #endif
  902. cmpl $entry_SYSENTER_32, (%esp)
  903. je nmi_stack_fixup
  904. pushl %eax
  905. movl %esp, %eax
  906. /*
  907. * Do not access memory above the end of our stack page,
  908. * it might not exist.
  909. */
  910. andl $(THREAD_SIZE-1), %eax
  911. cmpl $(THREAD_SIZE-20), %eax
  912. popl %eax
  913. jae nmi_stack_correct
  914. cmpl $entry_SYSENTER_32, 12(%esp)
  915. je nmi_debug_stack_check
  916. nmi_stack_correct:
  917. pushl %eax
  918. SAVE_ALL
  919. xorl %edx, %edx # zero error code
  920. movl %esp, %eax # pt_regs pointer
  921. call do_nmi
  922. jmp restore_all_notrace
  923. nmi_stack_fixup:
  924. FIX_STACK 12, nmi_stack_correct, 1
  925. jmp nmi_stack_correct
  926. nmi_debug_stack_check:
  927. cmpw $__KERNEL_CS, 16(%esp)
  928. jne nmi_stack_correct
  929. cmpl $debug, (%esp)
  930. jb nmi_stack_correct
  931. cmpl $debug_esp_fix_insn, (%esp)
  932. ja nmi_stack_correct
  933. FIX_STACK 24, nmi_stack_correct, 1
  934. jmp nmi_stack_correct
  935. #ifdef CONFIG_X86_ESPFIX32
  936. nmi_espfix_stack:
  937. /*
  938. * create the pointer to lss back
  939. */
  940. pushl %ss
  941. pushl %esp
  942. addl $4, (%esp)
  943. /* copy the iret frame of 12 bytes */
  944. .rept 3
  945. pushl 16(%esp)
  946. .endr
  947. pushl %eax
  948. SAVE_ALL
  949. FIXUP_ESPFIX_STACK # %eax == %esp
  950. xorl %edx, %edx # zero error code
  951. call do_nmi
  952. RESTORE_REGS
  953. lss 12+4(%esp), %esp # back to espfix stack
  954. jmp irq_return
  955. #endif
  956. END(nmi)
  957. ENTRY(int3)
  958. ASM_CLAC
  959. pushl $-1 # mark this as an int
  960. SAVE_ALL
  961. TRACE_IRQS_OFF
  962. xorl %edx, %edx # zero error code
  963. movl %esp, %eax # pt_regs pointer
  964. call do_int3
  965. jmp ret_from_exception
  966. END(int3)
  967. ENTRY(general_protection)
  968. pushl $do_general_protection
  969. jmp error_code
  970. END(general_protection)
  971. #ifdef CONFIG_KVM_GUEST
  972. ENTRY(async_page_fault)
  973. ASM_CLAC
  974. pushl $do_async_page_fault
  975. jmp error_code
  976. END(async_page_fault)
  977. #endif