Version:  2.0.40 2.2.26 2.4.37 3.0 3.1 3.2 3.3 3.4 3.5 3.6 3.7 3.8 3.9 3.10 3.11 3.12 3.13 3.14 3.15 3.16

Linux/arch/x86/kernel/entry_32.S

  1 /*
  2  *
  3  *  Copyright (C) 1991, 1992  Linus Torvalds
  4  */
  5 
  6 /*
  7  * entry.S contains the system-call and fault low-level handling routines.
  8  * This also contains the timer-interrupt handler, as well as all interrupts
  9  * and faults that can result in a task-switch.
 10  *
 11  * NOTE: This code handles signal-recognition, which happens every time
 12  * after a timer-interrupt and after each system call.
 13  *
 14  * I changed all the .align's to 4 (16 byte alignment), as that's faster
 15  * on a 486.
 16  *
 17  * Stack layout in 'syscall_exit':
 18  *      ptrace needs to have all regs on the stack.
 19  *      if the order here is changed, it needs to be
 20  *      updated in fork.c:copy_process, signal.c:do_signal,
 21  *      ptrace.c and ptrace.h
 22  *
 23  *       0(%esp) - %ebx
 24  *       4(%esp) - %ecx
 25  *       8(%esp) - %edx
 26  *       C(%esp) - %esi
 27  *      10(%esp) - %edi
 28  *      14(%esp) - %ebp
 29  *      18(%esp) - %eax
 30  *      1C(%esp) - %ds
 31  *      20(%esp) - %es
 32  *      24(%esp) - %fs
 33  *      28(%esp) - %gs          saved iff !CONFIG_X86_32_LAZY_GS
 34  *      2C(%esp) - orig_eax
 35  *      30(%esp) - %eip
 36  *      34(%esp) - %cs
 37  *      38(%esp) - %eflags
 38  *      3C(%esp) - %oldesp
 39  *      40(%esp) - %oldss
 40  *
 41  * "current" is in register %ebx during any slow entries.
 42  */
 43 
 44 #include <linux/linkage.h>
 45 #include <asm/thread_info.h>
 46 #include <asm/irqflags.h>
 47 #include <asm/errno.h>
 48 #include <asm/segment.h>
 49 #include <asm/smp.h>
 50 #include <asm/page_types.h>
 51 #include <asm/percpu.h>
 52 #include <asm/dwarf2.h>
 53 #include <asm/processor-flags.h>
 54 #include <asm/ftrace.h>
 55 #include <asm/irq_vectors.h>
 56 #include <asm/cpufeature.h>
 57 
 58 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this.  */
 59 #include <linux/elf-em.h>
 60 #define AUDIT_ARCH_I386         (EM_386|__AUDIT_ARCH_LE)
 61 #define __AUDIT_ARCH_LE    0x40000000
 62 
 63 #ifndef CONFIG_AUDITSYSCALL
 64 #define sysenter_audit  syscall_trace_entry
 65 #define sysexit_audit   syscall_exit_work
 66 #endif
 67 
 68 /*
 69  * We use macros for low-level operations which need to be overridden
 70  * for paravirtualization.  The following will never clobber any registers:
 71  *   INTERRUPT_RETURN (aka. "iret")
 72  *   GET_CR0_INTO_EAX (aka. "movl %cr0, %eax")
 73  *   ENABLE_INTERRUPTS_SYSEXIT (aka "sti; sysexit").
 74  *
 75  * For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must
 76  * specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY).
 77  * Allowing a register to be clobbered can shrink the paravirt replacement
 78  * enough to patch inline, increasing performance.
 79  */
 80 
 81 #define nr_syscalls ((syscall_table_size)/4)
 82 
 83 #ifdef CONFIG_PREEMPT
 84 #define preempt_stop(clobbers)  DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF
 85 #else
 86 #define preempt_stop(clobbers)
 87 #define resume_kernel           restore_all
 88 #endif
 89 
 90 .macro TRACE_IRQS_IRET
 91 #ifdef CONFIG_TRACE_IRQFLAGS
 92         testl $X86_EFLAGS_IF,PT_EFLAGS(%esp)     # interrupts off?
 93         jz 1f
 94         TRACE_IRQS_ON
 95 1:
 96 #endif
 97 .endm
 98 
 99 #ifdef CONFIG_VM86
100 #define resume_userspace_sig    check_userspace
101 #else
102 #define resume_userspace_sig    resume_userspace
103 #endif
104 
105 /*
106  * User gs save/restore
107  *
108  * %gs is used for userland TLS and kernel only uses it for stack
109  * canary which is required to be at %gs:20 by gcc.  Read the comment
110  * at the top of stackprotector.h for more info.
111  *
112  * Local labels 98 and 99 are used.
113  */
114 #ifdef CONFIG_X86_32_LAZY_GS
115 
116  /* unfortunately push/pop can't be no-op */
117 .macro PUSH_GS
118         pushl_cfi $0
119 .endm
120 .macro POP_GS pop=0
121         addl $(4 + \pop), %esp
122         CFI_ADJUST_CFA_OFFSET -(4 + \pop)
123 .endm
124 .macro POP_GS_EX
125 .endm
126 
127  /* all the rest are no-op */
128 .macro PTGS_TO_GS
129 .endm
130 .macro PTGS_TO_GS_EX
131 .endm
132 .macro GS_TO_REG reg
133 .endm
134 .macro REG_TO_PTGS reg
135 .endm
136 .macro SET_KERNEL_GS reg
137 .endm
138 
139 #else   /* CONFIG_X86_32_LAZY_GS */
140 
141 .macro PUSH_GS
142         pushl_cfi %gs
143         /*CFI_REL_OFFSET gs, 0*/
144 .endm
145 
146 .macro POP_GS pop=0
147 98:     popl_cfi %gs
148         /*CFI_RESTORE gs*/
149   .if \pop <> 0
150         add $\pop, %esp
151         CFI_ADJUST_CFA_OFFSET -\pop
152   .endif
153 .endm
154 .macro POP_GS_EX
155 .pushsection .fixup, "ax"
156 99:     movl $0, (%esp)
157         jmp 98b
158 .section __ex_table, "a"
159         .align 4
160         .long 98b, 99b
161 .popsection
162 .endm
163 
164 .macro PTGS_TO_GS
165 98:     mov PT_GS(%esp), %gs
166 .endm
167 .macro PTGS_TO_GS_EX
168 .pushsection .fixup, "ax"
169 99:     movl $0, PT_GS(%esp)
170         jmp 98b
171 .section __ex_table, "a"
172         .align 4
173         .long 98b, 99b
174 .popsection
175 .endm
176 
177 .macro GS_TO_REG reg
178         movl %gs, \reg
179         /*CFI_REGISTER gs, \reg*/
180 .endm
181 .macro REG_TO_PTGS reg
182         movl \reg, PT_GS(%esp)
183         /*CFI_REL_OFFSET gs, PT_GS*/
184 .endm
185 .macro SET_KERNEL_GS reg
186         movl $(__KERNEL_STACK_CANARY), \reg
187         movl \reg, %gs
188 .endm
189 
190 #endif  /* CONFIG_X86_32_LAZY_GS */
191 
192 .macro SAVE_ALL
193         cld
194         PUSH_GS
195         pushl_cfi %fs
196         /*CFI_REL_OFFSET fs, 0;*/
197         pushl_cfi %es
198         /*CFI_REL_OFFSET es, 0;*/
199         pushl_cfi %ds
200         /*CFI_REL_OFFSET ds, 0;*/
201         pushl_cfi %eax
202         CFI_REL_OFFSET eax, 0
203         pushl_cfi %ebp
204         CFI_REL_OFFSET ebp, 0
205         pushl_cfi %edi
206         CFI_REL_OFFSET edi, 0
207         pushl_cfi %esi
208         CFI_REL_OFFSET esi, 0
209         pushl_cfi %edx
210         CFI_REL_OFFSET edx, 0
211         pushl_cfi %ecx
212         CFI_REL_OFFSET ecx, 0
213         pushl_cfi %ebx
214         CFI_REL_OFFSET ebx, 0
215         movl $(__USER_DS), %edx
216         movl %edx, %ds
217         movl %edx, %es
218         movl $(__KERNEL_PERCPU), %edx
219         movl %edx, %fs
220         SET_KERNEL_GS %edx
221 .endm
222 
223 .macro RESTORE_INT_REGS
224         popl_cfi %ebx
225         CFI_RESTORE ebx
226         popl_cfi %ecx
227         CFI_RESTORE ecx
228         popl_cfi %edx
229         CFI_RESTORE edx
230         popl_cfi %esi
231         CFI_RESTORE esi
232         popl_cfi %edi
233         CFI_RESTORE edi
234         popl_cfi %ebp
235         CFI_RESTORE ebp
236         popl_cfi %eax
237         CFI_RESTORE eax
238 .endm
239 
240 .macro RESTORE_REGS pop=0
241         RESTORE_INT_REGS
242 1:      popl_cfi %ds
243         /*CFI_RESTORE ds;*/
244 2:      popl_cfi %es
245         /*CFI_RESTORE es;*/
246 3:      popl_cfi %fs
247         /*CFI_RESTORE fs;*/
248         POP_GS \pop
249 .pushsection .fixup, "ax"
250 4:      movl $0, (%esp)
251         jmp 1b
252 5:      movl $0, (%esp)
253         jmp 2b
254 6:      movl $0, (%esp)
255         jmp 3b
256 .section __ex_table, "a"
257         .align 4
258         .long 1b, 4b
259         .long 2b, 5b
260         .long 3b, 6b
261 .popsection
262         POP_GS_EX
263 .endm
264 
265 .macro RING0_INT_FRAME
266         CFI_STARTPROC simple
267         CFI_SIGNAL_FRAME
268         CFI_DEF_CFA esp, 3*4
269         /*CFI_OFFSET cs, -2*4;*/
270         CFI_OFFSET eip, -3*4
271 .endm
272 
273 .macro RING0_EC_FRAME
274         CFI_STARTPROC simple
275         CFI_SIGNAL_FRAME
276         CFI_DEF_CFA esp, 4*4
277         /*CFI_OFFSET cs, -2*4;*/
278         CFI_OFFSET eip, -3*4
279 .endm
280 
281 .macro RING0_PTREGS_FRAME
282         CFI_STARTPROC simple
283         CFI_SIGNAL_FRAME
284         CFI_DEF_CFA esp, PT_OLDESP-PT_EBX
285         /*CFI_OFFSET cs, PT_CS-PT_OLDESP;*/
286         CFI_OFFSET eip, PT_EIP-PT_OLDESP
287         /*CFI_OFFSET es, PT_ES-PT_OLDESP;*/
288         /*CFI_OFFSET ds, PT_DS-PT_OLDESP;*/
289         CFI_OFFSET eax, PT_EAX-PT_OLDESP
290         CFI_OFFSET ebp, PT_EBP-PT_OLDESP
291         CFI_OFFSET edi, PT_EDI-PT_OLDESP
292         CFI_OFFSET esi, PT_ESI-PT_OLDESP
293         CFI_OFFSET edx, PT_EDX-PT_OLDESP
294         CFI_OFFSET ecx, PT_ECX-PT_OLDESP
295         CFI_OFFSET ebx, PT_EBX-PT_OLDESP
296 .endm
297 
298 ENTRY(ret_from_fork)
299         CFI_STARTPROC
300         pushl_cfi %eax
301         call schedule_tail
302         GET_THREAD_INFO(%ebp)
303         popl_cfi %eax
304         pushl_cfi $0x0202               # Reset kernel eflags
305         popfl_cfi
306         jmp syscall_exit
307         CFI_ENDPROC
308 END(ret_from_fork)
309 
310 /*
311  * Interrupt exit functions should be protected against kprobes
312  */
313         .pushsection .kprobes.text, "ax"
314 /*
315  * Return to user mode is not as complex as all this looks,
316  * but we want the default path for a system call return to
317  * go as quickly as possible which is why some of this is
318  * less clear than it otherwise should be.
319  */
320 
321         # userspace resumption stub bypassing syscall exit tracing
322         ALIGN
323         RING0_PTREGS_FRAME
324 ret_from_exception:
325         preempt_stop(CLBR_ANY)
326 ret_from_intr:
327         GET_THREAD_INFO(%ebp)
328 check_userspace:
329         movl PT_EFLAGS(%esp), %eax      # mix EFLAGS and CS
330         movb PT_CS(%esp), %al
331         andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
332         cmpl $USER_RPL, %eax
333         jb resume_kernel                # not returning to v8086 or userspace
334 
335 ENTRY(resume_userspace)
336         LOCKDEP_SYS_EXIT
337         DISABLE_INTERRUPTS(CLBR_ANY)    # make sure we don't miss an interrupt
338                                         # setting need_resched or sigpending
339                                         # between sampling and the iret
340         TRACE_IRQS_OFF
341         movl TI_flags(%ebp), %ecx
342         andl $_TIF_WORK_MASK, %ecx      # is there any work to be done on
343                                         # int/exception return?
344         jne work_pending
345         jmp restore_all
346 END(ret_from_exception)
347 
348 #ifdef CONFIG_PREEMPT
349 ENTRY(resume_kernel)
350         DISABLE_INTERRUPTS(CLBR_ANY)
351         cmpl $0,TI_preempt_count(%ebp)  # non-zero preempt_count ?
352         jnz restore_all
353 need_resched:
354         movl TI_flags(%ebp), %ecx       # need_resched set ?
355         testb $_TIF_NEED_RESCHED, %cl
356         jz restore_all
357         testl $X86_EFLAGS_IF,PT_EFLAGS(%esp)    # interrupts off (exception path) ?
358         jz restore_all
359         call preempt_schedule_irq
360         jmp need_resched
361 END(resume_kernel)
362 #endif
363         CFI_ENDPROC
364 /*
365  * End of kprobes section
366  */
367         .popsection
368 
369 /* SYSENTER_RETURN points to after the "sysenter" instruction in
370    the vsyscall page.  See vsyscall-sysentry.S, which defines the symbol.  */
371 
372         # sysenter call handler stub
373 ENTRY(ia32_sysenter_target)
374         CFI_STARTPROC simple
375         CFI_SIGNAL_FRAME
376         CFI_DEF_CFA esp, 0
377         CFI_REGISTER esp, ebp
378         movl TSS_sysenter_sp0(%esp),%esp
379 sysenter_past_esp:
380         /*
381          * Interrupts are disabled here, but we can't trace it until
382          * enough kernel state to call TRACE_IRQS_OFF can be called - but
383          * we immediately enable interrupts at that point anyway.
384          */
385         pushl_cfi $__USER_DS
386         /*CFI_REL_OFFSET ss, 0*/
387         pushl_cfi %ebp
388         CFI_REL_OFFSET esp, 0
389         pushfl_cfi
390         orl $X86_EFLAGS_IF, (%esp)
391         pushl_cfi $__USER_CS
392         /*CFI_REL_OFFSET cs, 0*/
393         /*
394          * Push current_thread_info()->sysenter_return to the stack.
395          * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
396          * pushed above; +8 corresponds to copy_thread's esp0 setting.
397          */
398         pushl_cfi ((TI_sysenter_return)-THREAD_SIZE_asm+8+4*4)(%esp)
399         CFI_REL_OFFSET eip, 0
400 
401         pushl_cfi %eax
402         SAVE_ALL
403         ENABLE_INTERRUPTS(CLBR_NONE)
404 
405 /*
406  * Load the potential sixth argument from user stack.
407  * Careful about security.
408  */
409         cmpl $__PAGE_OFFSET-3,%ebp
410         jae syscall_fault
411 1:      movl (%ebp),%ebp
412         movl %ebp,PT_EBP(%esp)
413 .section __ex_table,"a"
414         .align 4
415         .long 1b,syscall_fault
416 .previous
417 
418         GET_THREAD_INFO(%ebp)
419 
420         testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
421         jnz sysenter_audit
422 sysenter_do_call:
423         cmpl $(nr_syscalls), %eax
424         jae syscall_badsys
425         call *sys_call_table(,%eax,4)
426         movl %eax,PT_EAX(%esp)
427         LOCKDEP_SYS_EXIT
428         DISABLE_INTERRUPTS(CLBR_ANY)
429         TRACE_IRQS_OFF
430         movl TI_flags(%ebp), %ecx
431         testl $_TIF_ALLWORK_MASK, %ecx
432         jne sysexit_audit
433 sysenter_exit:
434 /* if something modifies registers it must also disable sysexit */
435         movl PT_EIP(%esp), %edx
436         movl PT_OLDESP(%esp), %ecx
437         xorl %ebp,%ebp
438         TRACE_IRQS_ON
439 1:      mov  PT_FS(%esp), %fs
440         PTGS_TO_GS
441         ENABLE_INTERRUPTS_SYSEXIT
442 
443 #ifdef CONFIG_AUDITSYSCALL
444 sysenter_audit:
445         testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
446         jnz syscall_trace_entry
447         addl $4,%esp
448         CFI_ADJUST_CFA_OFFSET -4
449         /* %esi already in 8(%esp)         6th arg: 4th syscall arg */
450         /* %edx already in 4(%esp)         5th arg: 3rd syscall arg */
451         /* %ecx already in 0(%esp)         4th arg: 2nd syscall arg */
452         movl %ebx,%ecx                  /* 3rd arg: 1st syscall arg */
453         movl %eax,%edx                  /* 2nd arg: syscall number */
454         movl $AUDIT_ARCH_I386,%eax      /* 1st arg: audit arch */
455         call audit_syscall_entry
456         pushl_cfi %ebx
457         movl PT_EAX(%esp),%eax          /* reload syscall number */
458         jmp sysenter_do_call
459 
460 sysexit_audit:
461         testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %ecx
462         jne syscall_exit_work
463         TRACE_IRQS_ON
464         ENABLE_INTERRUPTS(CLBR_ANY)
465         movl %eax,%edx          /* second arg, syscall return value */
466         cmpl $0,%eax            /* is it < 0? */
467         setl %al                /* 1 if so, 0 if not */
468         movzbl %al,%eax         /* zero-extend that */
469         inc %eax /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
470         call audit_syscall_exit
471         DISABLE_INTERRUPTS(CLBR_ANY)
472         TRACE_IRQS_OFF
473         movl TI_flags(%ebp), %ecx
474         testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %ecx
475         jne syscall_exit_work
476         movl PT_EAX(%esp),%eax  /* reload syscall return value */
477         jmp sysenter_exit
478 #endif
479 
480         CFI_ENDPROC
481 .pushsection .fixup,"ax"
482 2:      movl $0,PT_FS(%esp)
483         jmp 1b
484 .section __ex_table,"a"
485         .align 4
486         .long 1b,2b
487 .popsection
488         PTGS_TO_GS_EX
489 ENDPROC(ia32_sysenter_target)
490 
491 /*
492  * syscall stub including irq exit should be protected against kprobes
493  */
494         .pushsection .kprobes.text, "ax"
495         # system call handler stub
496 ENTRY(system_call)
497         RING0_INT_FRAME                 # can't unwind into user space anyway
498         pushl_cfi %eax                  # save orig_eax
499         SAVE_ALL
500         GET_THREAD_INFO(%ebp)
501                                         # system call tracing in operation / emulation
502         testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
503         jnz syscall_trace_entry
504         cmpl $(nr_syscalls), %eax
505         jae syscall_badsys
506 syscall_call:
507         call *sys_call_table(,%eax,4)
508         movl %eax,PT_EAX(%esp)          # store the return value
509 syscall_exit:
510         LOCKDEP_SYS_EXIT
511         DISABLE_INTERRUPTS(CLBR_ANY)    # make sure we don't miss an interrupt
512                                         # setting need_resched or sigpending
513                                         # between sampling and the iret
514         TRACE_IRQS_OFF
515         movl TI_flags(%ebp), %ecx
516         testl $_TIF_ALLWORK_MASK, %ecx  # current->work
517         jne syscall_exit_work
518 
519 restore_all:
520         TRACE_IRQS_IRET
521 restore_all_notrace:
522         movl PT_EFLAGS(%esp), %eax      # mix EFLAGS, SS and CS
523         # Warning: PT_OLDSS(%esp) contains the wrong/random values if we
524         # are returning to the kernel.
525         # See comments in process.c:copy_thread() for details.
526         movb PT_OLDSS(%esp), %ah
527         movb PT_CS(%esp), %al
528         andl $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
529         cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
530         CFI_REMEMBER_STATE
531         je ldt_ss                       # returning to user-space with LDT SS
532 restore_nocheck:
533         RESTORE_REGS 4                  # skip orig_eax/error_code
534 irq_return:
535         INTERRUPT_RETURN
536 .section .fixup,"ax"
537 ENTRY(iret_exc)
538         pushl $0                        # no error code
539         pushl $do_iret_error
540         jmp error_code
541 .previous
542 .section __ex_table,"a"
543         .align 4
544         .long irq_return,iret_exc
545 .previous
546 
547         CFI_RESTORE_STATE
548 ldt_ss:
549         larl PT_OLDSS(%esp), %eax
550         jnz restore_nocheck
551         testl $0x00400000, %eax         # returning to 32bit stack?
552         jnz restore_nocheck             # allright, normal return
553 
554 #ifdef CONFIG_PARAVIRT
555         /*
556          * The kernel can't run on a non-flat stack if paravirt mode
557          * is active.  Rather than try to fixup the high bits of
558          * ESP, bypass this code entirely.  This may break DOSemu
559          * and/or Wine support in a paravirt VM, although the option
560          * is still available to implement the setting of the high
561          * 16-bits in the INTERRUPT_RETURN paravirt-op.
562          */
563         cmpl $0, pv_info+PARAVIRT_enabled
564         jne restore_nocheck
565 #endif
566 
567 /*
568  * Setup and switch to ESPFIX stack
569  *
570  * We're returning to userspace with a 16 bit stack. The CPU will not
571  * restore the high word of ESP for us on executing iret... This is an
572  * "official" bug of all the x86-compatible CPUs, which we can work
573  * around to make dosemu and wine happy. We do this by preloading the
574  * high word of ESP with the high word of the userspace ESP while
575  * compensating for the offset by changing to the ESPFIX segment with
576  * a base address that matches for the difference.
577  */
578 #define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
579         mov %esp, %edx                  /* load kernel esp */
580         mov PT_OLDESP(%esp), %eax       /* load userspace esp */
581         mov %dx, %ax                    /* eax: new kernel esp */
582         sub %eax, %edx                  /* offset (low word is 0) */
583         shr $16, %edx
584         mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
585         mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
586         pushl_cfi $__ESPFIX_SS
587         pushl_cfi %eax                  /* new kernel esp */
588         /* Disable interrupts, but do not irqtrace this section: we
589          * will soon execute iret and the tracer was already set to
590          * the irqstate after the iret */
591         DISABLE_INTERRUPTS(CLBR_EAX)
592         lss (%esp), %esp                /* switch to espfix segment */
593         CFI_ADJUST_CFA_OFFSET -8
594         jmp restore_nocheck
595         CFI_ENDPROC
596 ENDPROC(system_call)
597 
598         # perform work that needs to be done immediately before resumption
599         ALIGN
600         RING0_PTREGS_FRAME              # can't unwind into user space anyway
601 work_pending:
602         testb $_TIF_NEED_RESCHED, %cl
603         jz work_notifysig
604 work_resched:
605         call schedule
606         LOCKDEP_SYS_EXIT
607         DISABLE_INTERRUPTS(CLBR_ANY)    # make sure we don't miss an interrupt
608                                         # setting need_resched or sigpending
609                                         # between sampling and the iret
610         TRACE_IRQS_OFF
611         movl TI_flags(%ebp), %ecx
612         andl $_TIF_WORK_MASK, %ecx      # is there any work to be done other
613                                         # than syscall tracing?
614         jz restore_all
615         testb $_TIF_NEED_RESCHED, %cl
616         jnz work_resched
617 
618 work_notifysig:                         # deal with pending signals and
619                                         # notify-resume requests
620 #ifdef CONFIG_VM86
621         testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
622         movl %esp, %eax
623         jne work_notifysig_v86          # returning to kernel-space or
624                                         # vm86-space
625         xorl %edx, %edx
626         call do_notify_resume
627         jmp resume_userspace_sig
628 
629         ALIGN
630 work_notifysig_v86:
631         pushl_cfi %ecx                  # save ti_flags for do_notify_resume
632         call save_v86_state             # %eax contains pt_regs pointer
633         popl_cfi %ecx
634         movl %eax, %esp
635 #else
636         movl %esp, %eax
637 #endif
638         xorl %edx, %edx
639         call do_notify_resume
640         jmp resume_userspace_sig
641 END(work_pending)
642 
643         # perform syscall exit tracing
644         ALIGN
645 syscall_trace_entry:
646         movl $-ENOSYS,PT_EAX(%esp)
647         movl %esp, %eax
648         call syscall_trace_enter
649         /* What it returned is what we'll actually use.  */
650         cmpl $(nr_syscalls), %eax
651         jnae syscall_call
652         jmp syscall_exit
653 END(syscall_trace_entry)
654 
655         # perform syscall exit tracing
656         ALIGN
657 syscall_exit_work:
658         testl $_TIF_WORK_SYSCALL_EXIT, %ecx
659         jz work_pending
660         TRACE_IRQS_ON
661         ENABLE_INTERRUPTS(CLBR_ANY)     # could let syscall_trace_leave() call
662                                         # schedule() instead
663         movl %esp, %eax
664         call syscall_trace_leave
665         jmp resume_userspace
666 END(syscall_exit_work)
667         CFI_ENDPROC
668 
669         RING0_INT_FRAME                 # can't unwind into user space anyway
670 syscall_fault:
671         GET_THREAD_INFO(%ebp)
672         movl $-EFAULT,PT_EAX(%esp)
673         jmp resume_userspace
674 END(syscall_fault)
675 
676 syscall_badsys:
677         movl $-ENOSYS,PT_EAX(%esp)
678         jmp resume_userspace
679 END(syscall_badsys)
680         CFI_ENDPROC
681 /*
682  * End of kprobes section
683  */
684         .popsection
685 
686 /*
687  * System calls that need a pt_regs pointer.
688  */
689 #define PTREGSCALL0(name) \
690         ALIGN; \
691 ptregs_##name: \
692         leal 4(%esp),%eax; \
693         jmp sys_##name;
694 
695 #define PTREGSCALL1(name) \
696         ALIGN; \
697 ptregs_##name: \
698         leal 4(%esp),%edx; \
699         movl (PT_EBX+4)(%esp),%eax; \
700         jmp sys_##name;
701 
702 #define PTREGSCALL2(name) \
703         ALIGN; \
704 ptregs_##name: \
705         leal 4(%esp),%ecx; \
706         movl (PT_ECX+4)(%esp),%edx; \
707         movl (PT_EBX+4)(%esp),%eax; \
708         jmp sys_##name;
709 
710 #define PTREGSCALL3(name) \
711         ALIGN; \
712 ptregs_##name: \
713         CFI_STARTPROC; \
714         leal 4(%esp),%eax; \
715         pushl_cfi %eax; \
716         movl PT_EDX(%eax),%ecx; \
717         movl PT_ECX(%eax),%edx; \
718         movl PT_EBX(%eax),%eax; \
719         call sys_##name; \
720         addl $4,%esp; \
721         CFI_ADJUST_CFA_OFFSET -4; \
722         ret; \
723         CFI_ENDPROC; \
724 ENDPROC(ptregs_##name)
725 
726 PTREGSCALL1(iopl)
727 PTREGSCALL0(fork)
728 PTREGSCALL0(vfork)
729 PTREGSCALL3(execve)
730 PTREGSCALL2(sigaltstack)
731 PTREGSCALL0(sigreturn)
732 PTREGSCALL0(rt_sigreturn)
733 PTREGSCALL2(vm86)
734 PTREGSCALL1(vm86old)
735 
736 /* Clone is an oddball.  The 4th arg is in %edi */
737         ALIGN;
738 ptregs_clone:
739         CFI_STARTPROC
740         leal 4(%esp),%eax
741         pushl_cfi %eax
742         pushl_cfi PT_EDI(%eax)
743         movl PT_EDX(%eax),%ecx
744         movl PT_ECX(%eax),%edx
745         movl PT_EBX(%eax),%eax
746         call sys_clone
747         addl $8,%esp
748         CFI_ADJUST_CFA_OFFSET -8
749         ret
750         CFI_ENDPROC
751 ENDPROC(ptregs_clone)
752 
753 .macro FIXUP_ESPFIX_STACK
754 /*
755  * Switch back for ESPFIX stack to the normal zerobased stack
756  *
757  * We can't call C functions using the ESPFIX stack. This code reads
758  * the high word of the segment base from the GDT and swiches to the
759  * normal stack and adjusts ESP with the matching offset.
760  */
761         /* fixup the stack */
762         mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
763         mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
764         shl $16, %eax
765         addl %esp, %eax                 /* the adjusted stack pointer */
766         pushl_cfi $__KERNEL_DS
767         pushl_cfi %eax
768         lss (%esp), %esp                /* switch to the normal stack segment */
769         CFI_ADJUST_CFA_OFFSET -8
770 .endm
771 .macro UNWIND_ESPFIX_STACK
772         movl %ss, %eax
773         /* see if on espfix stack */
774         cmpw $__ESPFIX_SS, %ax
775         jne 27f
776         movl $__KERNEL_DS, %eax
777         movl %eax, %ds
778         movl %eax, %es
779         /* switch to normal stack */
780         FIXUP_ESPFIX_STACK
781 27:
782 .endm
783 
784 /*
785  * Build the entry stubs and pointer table with some assembler magic.
786  * We pack 7 stubs into a single 32-byte chunk, which will fit in a
787  * single cache line on all modern x86 implementations.
788  */
789 .section .init.rodata,"a"
790 ENTRY(interrupt)
791 .text
792         .p2align 5
793         .p2align CONFIG_X86_L1_CACHE_SHIFT
794 ENTRY(irq_entries_start)
795         RING0_INT_FRAME
796 vector=FIRST_EXTERNAL_VECTOR
797 .rept (NR_VECTORS-FIRST_EXTERNAL_VECTOR+6)/7
798         .balign 32
799   .rept 7
800     .if vector < NR_VECTORS
801       .if vector <> FIRST_EXTERNAL_VECTOR
802         CFI_ADJUST_CFA_OFFSET -4
803       .endif
804 1:      pushl_cfi $(~vector+0x80)       /* Note: always in signed byte range */
805       .if ((vector-FIRST_EXTERNAL_VECTOR)%7) <> 6
806         jmp 2f
807       .endif
808       .previous
809         .long 1b
810       .text
811 vector=vector+1
812     .endif
813   .endr
814 2:      jmp common_interrupt
815 .endr
816 END(irq_entries_start)
817 
818 .previous
819 END(interrupt)
820 .previous
821 
822 /*
823  * the CPU automatically disables interrupts when executing an IRQ vector,
824  * so IRQ-flags tracing has to follow that:
825  */
826         .p2align CONFIG_X86_L1_CACHE_SHIFT
827 common_interrupt:
828         addl $-0x80,(%esp)      /* Adjust vector into the [-256,-1] range */
829         SAVE_ALL
830         TRACE_IRQS_OFF
831         movl %esp,%eax
832         call do_IRQ
833         jmp ret_from_intr
834 ENDPROC(common_interrupt)
835         CFI_ENDPROC
836 
837 /*
838  *  Irq entries should be protected against kprobes
839  */
840         .pushsection .kprobes.text, "ax"
841 #define BUILD_INTERRUPT3(name, nr, fn)  \
842 ENTRY(name)                             \
843         RING0_INT_FRAME;                \
844         pushl_cfi $~(nr);               \
845         SAVE_ALL;                       \
846         TRACE_IRQS_OFF                  \
847         movl %esp,%eax;                 \
848         call fn;                        \
849         jmp ret_from_intr;              \
850         CFI_ENDPROC;                    \
851 ENDPROC(name)
852 
853 #define BUILD_INTERRUPT(name, nr)       BUILD_INTERRUPT3(name, nr, smp_##name)
854 
855 /* The include is where all of the SMP etc. interrupts come from */
856 #include <asm/entry_arch.h>
857 
858 ENTRY(coprocessor_error)
859         RING0_INT_FRAME
860         pushl_cfi $0
861         pushl_cfi $do_coprocessor_error
862         jmp error_code
863         CFI_ENDPROC
864 END(coprocessor_error)
865 
866 ENTRY(simd_coprocessor_error)
867         RING0_INT_FRAME
868         pushl_cfi $0
869 #ifdef CONFIG_X86_INVD_BUG
870         /* AMD 486 bug: invd from userspace calls exception 19 instead of #GP */
871 661:    pushl_cfi $do_general_protection
872 662:
873 .section .altinstructions,"a"
874         .balign 4
875         .long 661b
876         .long 663f
877         .word X86_FEATURE_XMM
878         .byte 662b-661b
879         .byte 664f-663f
880 .previous
881 .section .altinstr_replacement,"ax"
882 663:    pushl $do_simd_coprocessor_error
883 664:
884 .previous
885 #else
886         pushl_cfi $do_simd_coprocessor_error
887 #endif
888         jmp error_code
889         CFI_ENDPROC
890 END(simd_coprocessor_error)
891 
892 ENTRY(device_not_available)
893         RING0_INT_FRAME
894         pushl_cfi $-1                   # mark this as an int
895         pushl_cfi $do_device_not_available
896         jmp error_code
897         CFI_ENDPROC
898 END(device_not_available)
899 
900 #ifdef CONFIG_PARAVIRT
901 ENTRY(native_iret)
902         iret
903 .section __ex_table,"a"
904         .align 4
905         .long native_iret, iret_exc
906 .previous
907 END(native_iret)
908 
909 ENTRY(native_irq_enable_sysexit)
910         sti
911         sysexit
912 END(native_irq_enable_sysexit)
913 #endif
914 
915 ENTRY(overflow)
916         RING0_INT_FRAME
917         pushl_cfi $0
918         pushl_cfi $do_overflow
919         jmp error_code
920         CFI_ENDPROC
921 END(overflow)
922 
923 ENTRY(bounds)
924         RING0_INT_FRAME
925         pushl_cfi $0
926         pushl_cfi $do_bounds
927         jmp error_code
928         CFI_ENDPROC
929 END(bounds)
930 
931 ENTRY(invalid_op)
932         RING0_INT_FRAME
933         pushl_cfi $0
934         pushl_cfi $do_invalid_op
935         jmp error_code
936         CFI_ENDPROC
937 END(invalid_op)
938 
939 ENTRY(coprocessor_segment_overrun)
940         RING0_INT_FRAME
941         pushl_cfi $0
942         pushl_cfi $do_coprocessor_segment_overrun
943         jmp error_code
944         CFI_ENDPROC
945 END(coprocessor_segment_overrun)
946 
947 ENTRY(invalid_TSS)
948         RING0_EC_FRAME
949         pushl_cfi $do_invalid_TSS
950         jmp error_code
951         CFI_ENDPROC
952 END(invalid_TSS)
953 
954 ENTRY(segment_not_present)
955         RING0_EC_FRAME
956         pushl_cfi $do_segment_not_present
957         jmp error_code
958         CFI_ENDPROC
959 END(segment_not_present)
960 
961 ENTRY(stack_segment)
962         RING0_EC_FRAME
963         pushl_cfi $do_stack_segment
964         jmp error_code
965         CFI_ENDPROC
966 END(stack_segment)
967 
968 ENTRY(alignment_check)
969         RING0_EC_FRAME
970         pushl_cfi $do_alignment_check
971         jmp error_code
972         CFI_ENDPROC
973 END(alignment_check)
974 
975 ENTRY(divide_error)
976         RING0_INT_FRAME
977         pushl_cfi $0                    # no error code
978         pushl_cfi $do_divide_error
979         jmp error_code
980         CFI_ENDPROC
981 END(divide_error)
982 
983 #ifdef CONFIG_X86_MCE
984 ENTRY(machine_check)
985         RING0_INT_FRAME
986         pushl_cfi $0
987         pushl_cfi machine_check_vector
988         jmp error_code
989         CFI_ENDPROC
990 END(machine_check)
991 #endif
992 
993 ENTRY(spurious_interrupt_bug)
994         RING0_INT_FRAME
995         pushl_cfi $0
996         pushl_cfi $do_spurious_interrupt_bug
997         jmp error_code
998         CFI_ENDPROC
999 END(spurious_interrupt_bug)
1000 /*
1001  * End of kprobes section
1002  */
1003         .popsection
1004 
1005 ENTRY(kernel_thread_helper)
1006         pushl $0                # fake return address for unwinder
1007         CFI_STARTPROC
1008         movl %edi,%eax
1009         call *%esi
1010         call do_exit
1011         ud2                     # padding for call trace
1012         CFI_ENDPROC
1013 ENDPROC(kernel_thread_helper)
1014 
1015 #ifdef CONFIG_XEN
1016 /* Xen doesn't set %esp to be precisely what the normal sysenter
1017    entrypoint expects, so fix it up before using the normal path. */
1018 ENTRY(xen_sysenter_target)
1019         RING0_INT_FRAME
1020         addl $5*4, %esp         /* remove xen-provided frame */
1021         CFI_ADJUST_CFA_OFFSET -5*4
1022         jmp sysenter_past_esp
1023         CFI_ENDPROC
1024 
1025 ENTRY(xen_hypervisor_callback)
1026         CFI_STARTPROC
1027         pushl_cfi $0
1028         SAVE_ALL
1029         TRACE_IRQS_OFF
1030 
1031         /* Check to see if we got the event in the critical
1032            region in xen_iret_direct, after we've reenabled
1033            events and checked for pending events.  This simulates
1034            iret instruction's behaviour where it delivers a
1035            pending interrupt when enabling interrupts. */
1036         movl PT_EIP(%esp),%eax
1037         cmpl $xen_iret_start_crit,%eax
1038         jb   1f
1039         cmpl $xen_iret_end_crit,%eax
1040         jae  1f
1041 
1042         jmp  xen_iret_crit_fixup
1043 
1044 ENTRY(xen_do_upcall)
1045 1:      mov %esp, %eax
1046         call xen_evtchn_do_upcall
1047         jmp  ret_from_intr
1048         CFI_ENDPROC
1049 ENDPROC(xen_hypervisor_callback)
1050 
1051 # Hypervisor uses this for application faults while it executes.
1052 # We get here for two reasons:
1053 #  1. Fault while reloading DS, ES, FS or GS
1054 #  2. Fault while executing IRET
1055 # Category 1 we fix up by reattempting the load, and zeroing the segment
1056 # register if the load fails.
1057 # Category 2 we fix up by jumping to do_iret_error. We cannot use the
1058 # normal Linux return path in this case because if we use the IRET hypercall
1059 # to pop the stack frame we end up in an infinite loop of failsafe callbacks.
1060 # We distinguish between categories by maintaining a status value in EAX.
1061 ENTRY(xen_failsafe_callback)
1062         CFI_STARTPROC
1063         pushl_cfi %eax
1064         movl $1,%eax
1065 1:      mov 4(%esp),%ds
1066 2:      mov 8(%esp),%es
1067 3:      mov 12(%esp),%fs
1068 4:      mov 16(%esp),%gs
1069         testl %eax,%eax
1070         popl_cfi %eax
1071         lea 16(%esp),%esp
1072         CFI_ADJUST_CFA_OFFSET -16
1073         jz 5f
1074         addl $16,%esp
1075         jmp iret_exc            # EAX != 0 => Category 2 (Bad IRET)
1076 5:      pushl_cfi $0            # EAX == 0 => Category 1 (Bad segment)
1077         SAVE_ALL
1078         jmp ret_from_exception
1079         CFI_ENDPROC
1080 
1081 .section .fixup,"ax"
1082 6:      xorl %eax,%eax
1083         movl %eax,4(%esp)
1084         jmp 1b
1085 7:      xorl %eax,%eax
1086         movl %eax,8(%esp)
1087         jmp 2b
1088 8:      xorl %eax,%eax
1089         movl %eax,12(%esp)
1090         jmp 3b
1091 9:      xorl %eax,%eax
1092         movl %eax,16(%esp)
1093         jmp 4b
1094 .previous
1095 .section __ex_table,"a"
1096         .align 4
1097         .long 1b,6b
1098         .long 2b,7b
1099         .long 3b,8b
1100         .long 4b,9b
1101 .previous
1102 ENDPROC(xen_failsafe_callback)
1103 
1104 BUILD_INTERRUPT3(xen_hvm_callback_vector, XEN_HVM_EVTCHN_CALLBACK,
1105                 xen_evtchn_do_upcall)
1106 
1107 #endif  /* CONFIG_XEN */
1108 
1109 #ifdef CONFIG_FUNCTION_TRACER
1110 #ifdef CONFIG_DYNAMIC_FTRACE
1111 
1112 ENTRY(mcount)
1113         ret
1114 END(mcount)
1115 
1116 ENTRY(ftrace_caller)
1117         cmpl $0, function_trace_stop
1118         jne  ftrace_stub
1119 
1120         pushl %eax
1121         pushl %ecx
1122         pushl %edx
1123         movl 0xc(%esp), %eax
1124         movl 0x4(%ebp), %edx
1125         subl $MCOUNT_INSN_SIZE, %eax
1126 
1127 .globl ftrace_call
1128 ftrace_call:
1129         call ftrace_stub
1130 
1131         popl %edx
1132         popl %ecx
1133         popl %eax
1134 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1135 .globl ftrace_graph_call
1136 ftrace_graph_call:
1137         jmp ftrace_stub
1138 #endif
1139 
1140 .globl ftrace_stub
1141 ftrace_stub:
1142         ret
1143 END(ftrace_caller)
1144 
1145 #else /* ! CONFIG_DYNAMIC_FTRACE */
1146 
1147 ENTRY(mcount)
1148         cmpl $0, function_trace_stop
1149         jne  ftrace_stub
1150 
1151         cmpl $ftrace_stub, ftrace_trace_function
1152         jnz trace
1153 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1154         cmpl $ftrace_stub, ftrace_graph_return
1155         jnz ftrace_graph_caller
1156 
1157         cmpl $ftrace_graph_entry_stub, ftrace_graph_entry
1158         jnz ftrace_graph_caller
1159 #endif
1160 .globl ftrace_stub
1161 ftrace_stub:
1162         ret
1163 
1164         /* taken from glibc */
1165 trace:
1166         pushl %eax
1167         pushl %ecx
1168         pushl %edx
1169         movl 0xc(%esp), %eax
1170         movl 0x4(%ebp), %edx
1171         subl $MCOUNT_INSN_SIZE, %eax
1172 
1173         call *ftrace_trace_function
1174 
1175         popl %edx
1176         popl %ecx
1177         popl %eax
1178         jmp ftrace_stub
1179 END(mcount)
1180 #endif /* CONFIG_DYNAMIC_FTRACE */
1181 #endif /* CONFIG_FUNCTION_TRACER */
1182 
1183 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1184 ENTRY(ftrace_graph_caller)
1185         cmpl $0, function_trace_stop
1186         jne ftrace_stub
1187 
1188         pushl %eax
1189         pushl %ecx
1190         pushl %edx
1191         movl 0xc(%esp), %edx
1192         lea 0x4(%ebp), %eax
1193         movl (%ebp), %ecx
1194         subl $MCOUNT_INSN_SIZE, %edx
1195         call prepare_ftrace_return
1196         popl %edx
1197         popl %ecx
1198         popl %eax
1199         ret
1200 END(ftrace_graph_caller)
1201 
1202 .globl return_to_handler
1203 return_to_handler:
1204         pushl %eax
1205         pushl %edx
1206         movl %ebp, %eax
1207         call ftrace_return_to_handler
1208         movl %eax, %ecx
1209         popl %edx
1210         popl %eax
1211         jmp *%ecx
1212 #endif
1213 
1214 .section .rodata,"a"
1215 #include "syscall_table_32.S"
1216 
1217 syscall_table_size=(.-sys_call_table)
1218 
1219 /*
1220  * Some functions should be protected against kprobes
1221  */
1222         .pushsection .kprobes.text, "ax"
1223 
1224 ENTRY(page_fault)
1225         RING0_EC_FRAME
1226         pushl_cfi $do_page_fault
1227         ALIGN
1228 error_code:
1229         /* the function address is in %gs's slot on the stack */
1230         pushl_cfi %fs
1231         /*CFI_REL_OFFSET fs, 0*/
1232         pushl_cfi %es
1233         /*CFI_REL_OFFSET es, 0*/
1234         pushl_cfi %ds
1235         /*CFI_REL_OFFSET ds, 0*/
1236         pushl_cfi %eax
1237         CFI_REL_OFFSET eax, 0
1238         pushl_cfi %ebp
1239         CFI_REL_OFFSET ebp, 0
1240         pushl_cfi %edi
1241         CFI_REL_OFFSET edi, 0
1242         pushl_cfi %esi
1243         CFI_REL_OFFSET esi, 0
1244         pushl_cfi %edx
1245         CFI_REL_OFFSET edx, 0
1246         pushl_cfi %ecx
1247         CFI_REL_OFFSET ecx, 0
1248         pushl_cfi %ebx
1249         CFI_REL_OFFSET ebx, 0
1250         cld
1251         movl $(__KERNEL_PERCPU), %ecx
1252         movl %ecx, %fs
1253         UNWIND_ESPFIX_STACK
1254         GS_TO_REG %ecx
1255         movl PT_GS(%esp), %edi          # get the function address
1256         movl PT_ORIG_EAX(%esp), %edx    # get the error code
1257         movl $-1, PT_ORIG_EAX(%esp)     # no syscall to restart
1258         REG_TO_PTGS %ecx
1259         SET_KERNEL_GS %ecx
1260         movl $(__USER_DS), %ecx
1261         movl %ecx, %ds
1262         movl %ecx, %es
1263         TRACE_IRQS_OFF
1264         movl %esp,%eax                  # pt_regs pointer
1265         call *%edi
1266         jmp ret_from_exception
1267         CFI_ENDPROC
1268 END(page_fault)
1269 
1270 /*
1271  * Debug traps and NMI can happen at the one SYSENTER instruction
1272  * that sets up the real kernel stack. Check here, since we can't
1273  * allow the wrong stack to be used.
1274  *
1275  * "TSS_sysenter_sp0+12" is because the NMI/debug handler will have
1276  * already pushed 3 words if it hits on the sysenter instruction:
1277  * eflags, cs and eip.
1278  *
1279  * We just load the right stack, and push the three (known) values
1280  * by hand onto the new stack - while updating the return eip past
1281  * the instruction that would have done it for sysenter.
1282  */
1283 .macro FIX_STACK offset ok label
1284         cmpw $__KERNEL_CS, 4(%esp)
1285         jne \ok
1286 \label:
1287         movl TSS_sysenter_sp0 + \offset(%esp), %esp
1288         CFI_DEF_CFA esp, 0
1289         CFI_UNDEFINED eip
1290         pushfl_cfi
1291         pushl_cfi $__KERNEL_CS
1292         pushl_cfi $sysenter_past_esp
1293         CFI_REL_OFFSET eip, 0
1294 .endm
1295 
1296 ENTRY(debug)
1297         RING0_INT_FRAME
1298         cmpl $ia32_sysenter_target,(%esp)
1299         jne debug_stack_correct
1300         FIX_STACK 12, debug_stack_correct, debug_esp_fix_insn
1301 debug_stack_correct:
1302         pushl_cfi $-1                   # mark this as an int
1303         SAVE_ALL
1304         TRACE_IRQS_OFF
1305         xorl %edx,%edx                  # error code 0
1306         movl %esp,%eax                  # pt_regs pointer
1307         call do_debug
1308         jmp ret_from_exception
1309         CFI_ENDPROC
1310 END(debug)
1311 
1312 /*
1313  * NMI is doubly nasty. It can happen _while_ we're handling
1314  * a debug fault, and the debug fault hasn't yet been able to
1315  * clear up the stack. So we first check whether we got  an
1316  * NMI on the sysenter entry path, but after that we need to
1317  * check whether we got an NMI on the debug path where the debug
1318  * fault happened on the sysenter path.
1319  */
1320 ENTRY(nmi)
1321         RING0_INT_FRAME
1322         pushl_cfi %eax
1323         movl %ss, %eax
1324         cmpw $__ESPFIX_SS, %ax
1325         popl_cfi %eax
1326         je nmi_espfix_stack
1327         cmpl $ia32_sysenter_target,(%esp)
1328         je nmi_stack_fixup
1329         pushl_cfi %eax
1330         movl %esp,%eax
1331         /* Do not access memory above the end of our stack page,
1332          * it might not exist.
1333          */
1334         andl $(THREAD_SIZE-1),%eax
1335         cmpl $(THREAD_SIZE-20),%eax
1336         popl_cfi %eax
1337         jae nmi_stack_correct
1338         cmpl $ia32_sysenter_target,12(%esp)
1339         je nmi_debug_stack_check
1340 nmi_stack_correct:
1341         /* We have a RING0_INT_FRAME here */
1342         pushl_cfi %eax
1343         SAVE_ALL
1344         xorl %edx,%edx          # zero error code
1345         movl %esp,%eax          # pt_regs pointer
1346         call do_nmi
1347         jmp restore_all_notrace
1348         CFI_ENDPROC
1349 
1350 nmi_stack_fixup:
1351         RING0_INT_FRAME
1352         FIX_STACK 12, nmi_stack_correct, 1
1353         jmp nmi_stack_correct
1354 
1355 nmi_debug_stack_check:
1356         /* We have a RING0_INT_FRAME here */
1357         cmpw $__KERNEL_CS,16(%esp)
1358         jne nmi_stack_correct
1359         cmpl $debug,(%esp)
1360         jb nmi_stack_correct
1361         cmpl $debug_esp_fix_insn,(%esp)
1362         ja nmi_stack_correct
1363         FIX_STACK 24, nmi_stack_correct, 1
1364         jmp nmi_stack_correct
1365 
1366 nmi_espfix_stack:
1367         /* We have a RING0_INT_FRAME here.
1368          *
1369          * create the pointer to lss back
1370          */
1371         pushl_cfi %ss
1372         pushl_cfi %esp
1373         addl $4, (%esp)
1374         /* copy the iret frame of 12 bytes */
1375         .rept 3
1376         pushl_cfi 16(%esp)
1377         .endr
1378         pushl_cfi %eax
1379         SAVE_ALL
1380         FIXUP_ESPFIX_STACK              # %eax == %esp
1381         xorl %edx,%edx                  # zero error code
1382         call do_nmi
1383         RESTORE_REGS
1384         lss 12+4(%esp), %esp            # back to espfix stack
1385         CFI_ADJUST_CFA_OFFSET -24
1386         jmp irq_return
1387         CFI_ENDPROC
1388 END(nmi)
1389 
1390 ENTRY(int3)
1391         RING0_INT_FRAME
1392         pushl_cfi $-1                   # mark this as an int
1393         SAVE_ALL
1394         TRACE_IRQS_OFF
1395         xorl %edx,%edx          # zero error code
1396         movl %esp,%eax          # pt_regs pointer
1397         call do_int3
1398         jmp ret_from_exception
1399         CFI_ENDPROC
1400 END(int3)
1401 
1402 ENTRY(general_protection)
1403         RING0_EC_FRAME
1404         pushl_cfi $do_general_protection
1405         jmp error_code
1406         CFI_ENDPROC
1407 END(general_protection)
1408 
1409 #ifdef CONFIG_KVM_GUEST
1410 ENTRY(async_page_fault)
1411         RING0_EC_FRAME
1412         pushl $do_async_page_fault
1413         CFI_ADJUST_CFA_OFFSET 4
1414         jmp error_code
1415         CFI_ENDPROC
1416 END(apf_page_fault)
1417 #endif
1418 
1419 /*
1420  * End of kprobes section
1421  */
1422         .popsection

This page was automatically generated by LXR 0.3.1 (source).  •  Linux is a registered trademark of Linus Torvalds  •  Contact us