Version:  2.6.34 2.6.35 2.6.36 2.6.37 2.6.38 2.6.39 3.0 3.1 3.2 3.3 3.4 3.5 3.6 3.7 3.8 3.9 3.10 3.11 3.12 3.13 3.14

Linux/arch/x86/kernel/entry_64.S

  1 /*
  2  *  linux/arch/x86_64/entry.S
  3  *
  4  *  Copyright (C) 1991, 1992  Linus Torvalds
  5  *  Copyright (C) 2000, 2001, 2002  Andi Kleen SuSE Labs
  6  *  Copyright (C) 2000  Pavel Machek <pavel@suse.cz>
  7  */
  8 
  9 /*
 10  * entry.S contains the system-call and fault low-level handling routines.
 11  *
 12  * NOTE: This code handles signal-recognition, which happens every time
 13  * after an interrupt and after each system call.
 14  *
 15  * Normal syscalls and interrupts don't save a full stack frame, this is
 16  * only done for syscall tracing, signals or fork/exec et.al.
 17  *
 18  * A note on terminology:
 19  * - top of stack: Architecture defined interrupt frame from SS to RIP
 20  * at the top of the kernel process stack.
 21  * - partial stack frame: partially saved registers up to R11.
 22  * - full stack frame: Like partial stack frame, but all register saved.
 23  *
 24  * Some macro usage:
 25  * - CFI macros are used to generate dwarf2 unwind information for better
 26  * backtraces. They don't change any code.
 27  * - SAVE_ALL/RESTORE_ALL - Save/restore all registers
 28  * - SAVE_ARGS/RESTORE_ARGS - Save/restore registers that C functions modify.
 29  * There are unfortunately lots of special cases where some registers
 30  * not touched. The macro is a big mess that should be cleaned up.
 31  * - SAVE_REST/RESTORE_REST - Handle the registers not saved by SAVE_ARGS.
 32  * Gives a full stack frame.
 33  * - ENTRY/END Define functions in the symbol table.
 34  * - FIXUP_TOP_OF_STACK/RESTORE_TOP_OF_STACK - Fix up the hardware stack
 35  * frame that is otherwise undefined after a SYSCALL
 36  * - TRACE_IRQ_* - Trace hard interrupt state for lock debugging.
 37  * - errorentry/paranoidentry/zeroentry - Define exception entry points.
 38  */
 39 
 40 #include <linux/linkage.h>
 41 #include <asm/segment.h>
 42 #include <asm/cache.h>
 43 #include <asm/errno.h>
 44 #include <asm/dwarf2.h>
 45 #include <asm/calling.h>
 46 #include <asm/asm-offsets.h>
 47 #include <asm/msr.h>
 48 #include <asm/unistd.h>
 49 #include <asm/thread_info.h>
 50 #include <asm/hw_irq.h>
 51 #include <asm/page_types.h>
 52 #include <asm/irqflags.h>
 53 #include <asm/paravirt.h>
 54 #include <asm/ftrace.h>
 55 #include <asm/percpu.h>
 56 
 57 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this.  */
 58 #include <linux/elf-em.h>
 59 #define AUDIT_ARCH_X86_64       (EM_X86_64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
 60 #define __AUDIT_ARCH_64BIT 0x80000000
 61 #define __AUDIT_ARCH_LE    0x40000000
 62 
 63         .code64
 64         .section .entry.text, "ax"
 65 
 66 #ifdef CONFIG_FUNCTION_TRACER
 67 #ifdef CONFIG_DYNAMIC_FTRACE
 68 ENTRY(mcount)
 69         retq
 70 END(mcount)
 71 
 72 ENTRY(ftrace_caller)
 73         cmpl $0, function_trace_stop
 74         jne  ftrace_stub
 75 
 76         MCOUNT_SAVE_FRAME
 77 
 78         movq 0x38(%rsp), %rdi
 79         movq 8(%rbp), %rsi
 80         subq $MCOUNT_INSN_SIZE, %rdi
 81 
 82 GLOBAL(ftrace_call)
 83         call ftrace_stub
 84 
 85         MCOUNT_RESTORE_FRAME
 86 
 87 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 88 GLOBAL(ftrace_graph_call)
 89         jmp ftrace_stub
 90 #endif
 91 
 92 GLOBAL(ftrace_stub)
 93         retq
 94 END(ftrace_caller)
 95 
 96 #else /* ! CONFIG_DYNAMIC_FTRACE */
 97 ENTRY(mcount)
 98         cmpl $0, function_trace_stop
 99         jne  ftrace_stub
100 
101         cmpq $ftrace_stub, ftrace_trace_function
102         jnz trace
103 
104 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
105         cmpq $ftrace_stub, ftrace_graph_return
106         jnz ftrace_graph_caller
107 
108         cmpq $ftrace_graph_entry_stub, ftrace_graph_entry
109         jnz ftrace_graph_caller
110 #endif
111 
112 GLOBAL(ftrace_stub)
113         retq
114 
115 trace:
116         MCOUNT_SAVE_FRAME
117 
118         movq 0x38(%rsp), %rdi
119         movq 8(%rbp), %rsi
120         subq $MCOUNT_INSN_SIZE, %rdi
121 
122         call   *ftrace_trace_function
123 
124         MCOUNT_RESTORE_FRAME
125 
126         jmp ftrace_stub
127 END(mcount)
128 #endif /* CONFIG_DYNAMIC_FTRACE */
129 #endif /* CONFIG_FUNCTION_TRACER */
130 
131 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
132 ENTRY(ftrace_graph_caller)
133         cmpl $0, function_trace_stop
134         jne ftrace_stub
135 
136         MCOUNT_SAVE_FRAME
137 
138         leaq 8(%rbp), %rdi
139         movq 0x38(%rsp), %rsi
140         movq (%rbp), %rdx
141         subq $MCOUNT_INSN_SIZE, %rsi
142 
143         call    prepare_ftrace_return
144 
145         MCOUNT_RESTORE_FRAME
146 
147         retq
148 END(ftrace_graph_caller)
149 
150 GLOBAL(return_to_handler)
151         subq  $24, %rsp
152 
153         /* Save the return values */
154         movq %rax, (%rsp)
155         movq %rdx, 8(%rsp)
156         movq %rbp, %rdi
157 
158         call ftrace_return_to_handler
159 
160         movq %rax, %rdi
161         movq 8(%rsp), %rdx
162         movq (%rsp), %rax
163         addq $24, %rsp
164         jmp *%rdi
165 #endif
166 
167 
168 #ifndef CONFIG_PREEMPT
169 #define retint_kernel retint_restore_args
170 #endif
171 
172 #ifdef CONFIG_PARAVIRT
173 ENTRY(native_usergs_sysret64)
174         swapgs
175         sysretq
176 ENDPROC(native_usergs_sysret64)
177 #endif /* CONFIG_PARAVIRT */
178 
179 
180 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
181 #ifdef CONFIG_TRACE_IRQFLAGS
182         bt   $9,EFLAGS-\offset(%rsp)    /* interrupts off? */
183         jnc  1f
184         TRACE_IRQS_ON
185 1:
186 #endif
187 .endm
188 
189 /*
190  * C code is not supposed to know about undefined top of stack. Every time
191  * a C function with an pt_regs argument is called from the SYSCALL based
192  * fast path FIXUP_TOP_OF_STACK is needed.
193  * RESTORE_TOP_OF_STACK syncs the syscall state after any possible ptregs
194  * manipulation.
195  */
196 
197         /* %rsp:at FRAMEEND */
198         .macro FIXUP_TOP_OF_STACK tmp offset=0
199         movq PER_CPU_VAR(old_rsp),\tmp
200         movq \tmp,RSP+\offset(%rsp)
201         movq $__USER_DS,SS+\offset(%rsp)
202         movq $__USER_CS,CS+\offset(%rsp)
203         movq $-1,RCX+\offset(%rsp)
204         movq R11+\offset(%rsp),\tmp  /* get eflags */
205         movq \tmp,EFLAGS+\offset(%rsp)
206         .endm
207 
208         .macro RESTORE_TOP_OF_STACK tmp offset=0
209         movq RSP+\offset(%rsp),\tmp
210         movq \tmp,PER_CPU_VAR(old_rsp)
211         movq EFLAGS+\offset(%rsp),\tmp
212         movq \tmp,R11+\offset(%rsp)
213         .endm
214 
215         .macro FAKE_STACK_FRAME child_rip
216         /* push in order ss, rsp, eflags, cs, rip */
217         xorl %eax, %eax
218         pushq_cfi $__KERNEL_DS /* ss */
219         /*CFI_REL_OFFSET        ss,0*/
220         pushq_cfi %rax /* rsp */
221         CFI_REL_OFFSET  rsp,0
222         pushq_cfi $X86_EFLAGS_IF /* eflags - interrupts on */
223         /*CFI_REL_OFFSET        rflags,0*/
224         pushq_cfi $__KERNEL_CS /* cs */
225         /*CFI_REL_OFFSET        cs,0*/
226         pushq_cfi \child_rip /* rip */
227         CFI_REL_OFFSET  rip,0
228         pushq_cfi %rax /* orig rax */
229         .endm
230 
231         .macro UNFAKE_STACK_FRAME
232         addq $8*6, %rsp
233         CFI_ADJUST_CFA_OFFSET   -(6*8)
234         .endm
235 
236 /*
237  * initial frame state for interrupts (and exceptions without error code)
238  */
239         .macro EMPTY_FRAME start=1 offset=0
240         .if \start
241         CFI_STARTPROC simple
242         CFI_SIGNAL_FRAME
243         CFI_DEF_CFA rsp,8+\offset
244         .else
245         CFI_DEF_CFA_OFFSET 8+\offset
246         .endif
247         .endm
248 
249 /*
250  * initial frame state for interrupts (and exceptions without error code)
251  */
252         .macro INTR_FRAME start=1 offset=0
253         EMPTY_FRAME \start, SS+8+\offset-RIP
254         /*CFI_REL_OFFSET ss, SS+\offset-RIP*/
255         CFI_REL_OFFSET rsp, RSP+\offset-RIP
256         /*CFI_REL_OFFSET rflags, EFLAGS+\offset-RIP*/
257         /*CFI_REL_OFFSET cs, CS+\offset-RIP*/
258         CFI_REL_OFFSET rip, RIP+\offset-RIP
259         .endm
260 
261 /*
262  * initial frame state for exceptions with error code (and interrupts
263  * with vector already pushed)
264  */
265         .macro XCPT_FRAME start=1 offset=0
266         INTR_FRAME \start, RIP+\offset-ORIG_RAX
267         /*CFI_REL_OFFSET orig_rax, ORIG_RAX-ORIG_RAX*/
268         .endm
269 
270 /*
271  * frame that enables calling into C.
272  */
273         .macro PARTIAL_FRAME start=1 offset=0
274         XCPT_FRAME \start, ORIG_RAX+\offset-ARGOFFSET
275         CFI_REL_OFFSET rdi, RDI+\offset-ARGOFFSET
276         CFI_REL_OFFSET rsi, RSI+\offset-ARGOFFSET
277         CFI_REL_OFFSET rdx, RDX+\offset-ARGOFFSET
278         CFI_REL_OFFSET rcx, RCX+\offset-ARGOFFSET
279         CFI_REL_OFFSET rax, RAX+\offset-ARGOFFSET
280         CFI_REL_OFFSET r8, R8+\offset-ARGOFFSET
281         CFI_REL_OFFSET r9, R9+\offset-ARGOFFSET
282         CFI_REL_OFFSET r10, R10+\offset-ARGOFFSET
283         CFI_REL_OFFSET r11, R11+\offset-ARGOFFSET
284         .endm
285 
286 /*
287  * frame that enables passing a complete pt_regs to a C function.
288  */
289         .macro DEFAULT_FRAME start=1 offset=0
290         PARTIAL_FRAME \start, R11+\offset-R15
291         CFI_REL_OFFSET rbx, RBX+\offset
292         CFI_REL_OFFSET rbp, RBP+\offset
293         CFI_REL_OFFSET r12, R12+\offset
294         CFI_REL_OFFSET r13, R13+\offset
295         CFI_REL_OFFSET r14, R14+\offset
296         CFI_REL_OFFSET r15, R15+\offset
297         .endm
298 
299 /* save partial stack frame */
300         .pushsection .kprobes.text, "ax"
301 ENTRY(save_args)
302         XCPT_FRAME
303         cld
304         /*
305          * start from rbp in pt_regs and jump over
306          * return address.
307          */
308         movq_cfi rdi, RDI+8-RBP
309         movq_cfi rsi, RSI+8-RBP
310         movq_cfi rdx, RDX+8-RBP
311         movq_cfi rcx, RCX+8-RBP
312         movq_cfi rax, RAX+8-RBP
313         movq_cfi  r8,  R8+8-RBP
314         movq_cfi  r9,  R9+8-RBP
315         movq_cfi r10, R10+8-RBP
316         movq_cfi r11, R11+8-RBP
317 
318         leaq -RBP+8(%rsp),%rdi  /* arg1 for handler */
319         movq_cfi rbp, 8         /* push %rbp */
320         leaq 8(%rsp), %rbp              /* mov %rsp, %ebp */
321         testl $3, CS(%rdi)
322         je 1f
323         SWAPGS
324         /*
325          * irq_count is used to check if a CPU is already on an interrupt stack
326          * or not. While this is essentially redundant with preempt_count it is
327          * a little cheaper to use a separate counter in the PDA (short of
328          * moving irq_enter into assembly, which would be too much work)
329          */
330 1:      incl PER_CPU_VAR(irq_count)
331         jne 2f
332         popq_cfi %rax                   /* move return address... */
333         mov PER_CPU_VAR(irq_stack_ptr),%rsp
334         EMPTY_FRAME 0
335         pushq_cfi %rbp                  /* backlink for unwinder */
336         pushq_cfi %rax                  /* ... to the new stack */
337         /*
338          * We entered an interrupt context - irqs are off:
339          */
340 2:      TRACE_IRQS_OFF
341         ret
342         CFI_ENDPROC
343 END(save_args)
344         .popsection
345 
346 ENTRY(save_rest)
347         PARTIAL_FRAME 1 REST_SKIP+8
348         movq 5*8+16(%rsp), %r11 /* save return address */
349         movq_cfi rbx, RBX+16
350         movq_cfi rbp, RBP+16
351         movq_cfi r12, R12+16
352         movq_cfi r13, R13+16
353         movq_cfi r14, R14+16
354         movq_cfi r15, R15+16
355         movq %r11, 8(%rsp)      /* return address */
356         FIXUP_TOP_OF_STACK %r11, 16
357         ret
358         CFI_ENDPROC
359 END(save_rest)
360 
361 /* save complete stack frame */
362         .pushsection .kprobes.text, "ax"
363 ENTRY(save_paranoid)
364         XCPT_FRAME 1 RDI+8
365         cld
366         movq_cfi rdi, RDI+8
367         movq_cfi rsi, RSI+8
368         movq_cfi rdx, RDX+8
369         movq_cfi rcx, RCX+8
370         movq_cfi rax, RAX+8
371         movq_cfi r8, R8+8
372         movq_cfi r9, R9+8
373         movq_cfi r10, R10+8
374         movq_cfi r11, R11+8
375         movq_cfi rbx, RBX+8
376         movq_cfi rbp, RBP+8
377         movq_cfi r12, R12+8
378         movq_cfi r13, R13+8
379         movq_cfi r14, R14+8
380         movq_cfi r15, R15+8
381         movl $1,%ebx
382         movl $MSR_GS_BASE,%ecx
383         rdmsr
384         testl %edx,%edx
385         js 1f   /* negative -> in kernel */
386         SWAPGS
387         xorl %ebx,%ebx
388 1:      ret
389         CFI_ENDPROC
390 END(save_paranoid)
391         .popsection
392 
393 /*
394  * A newly forked process directly context switches into this address.
395  *
396  * rdi: prev task we switched from
397  */
398 ENTRY(ret_from_fork)
399         DEFAULT_FRAME
400 
401         LOCK ; btr $TIF_FORK,TI_flags(%r8)
402 
403         pushq_cfi kernel_eflags(%rip)
404         popfq_cfi                               # reset kernel eflags
405 
406         call schedule_tail                      # rdi: 'prev' task parameter
407 
408         GET_THREAD_INFO(%rcx)
409 
410         RESTORE_REST
411 
412         testl $3, CS-ARGOFFSET(%rsp)            # from kernel_thread?
413         je   int_ret_from_sys_call
414 
415         testl $_TIF_IA32, TI_flags(%rcx)        # 32-bit compat task needs IRET
416         jnz  int_ret_from_sys_call
417 
418         RESTORE_TOP_OF_STACK %rdi, -ARGOFFSET
419         jmp ret_from_sys_call                   # go to the SYSRET fastpath
420 
421         CFI_ENDPROC
422 END(ret_from_fork)
423 
424 /*
425  * System call entry. Up to 6 arguments in registers are supported.
426  *
427  * SYSCALL does not save anything on the stack and does not change the
428  * stack pointer.
429  */
430 
431 /*
432  * Register setup:
433  * rax  system call number
434  * rdi  arg0
435  * rcx  return address for syscall/sysret, C arg3
436  * rsi  arg1
437  * rdx  arg2
438  * r10  arg3    (--> moved to rcx for C)
439  * r8   arg4
440  * r9   arg5
441  * r11  eflags for syscall/sysret, temporary for C
442  * r12-r15,rbp,rbx saved by C code, not touched.
443  *
444  * Interrupts are off on entry.
445  * Only called from user space.
446  *
447  * XXX  if we had a free scratch register we could save the RSP into the stack frame
448  *      and report it properly in ps. Unfortunately we haven't.
449  *
450  * When user can change the frames always force IRET. That is because
451  * it deals with uncanonical addresses better. SYSRET has trouble
452  * with them due to bugs in both AMD and Intel CPUs.
453  */
454 
455 ENTRY(system_call)
456         CFI_STARTPROC   simple
457         CFI_SIGNAL_FRAME
458         CFI_DEF_CFA     rsp,KERNEL_STACK_OFFSET
459         CFI_REGISTER    rip,rcx
460         /*CFI_REGISTER  rflags,r11*/
461         SWAPGS_UNSAFE_STACK
462         /*
463          * A hypervisor implementation might want to use a label
464          * after the swapgs, so that it can do the swapgs
465          * for the guest and jump here on syscall.
466          */
467 ENTRY(system_call_after_swapgs)
468 
469         movq    %rsp,PER_CPU_VAR(old_rsp)
470         movq    PER_CPU_VAR(kernel_stack),%rsp
471         /*
472          * No need to follow this irqs off/on section - it's straight
473          * and short:
474          */
475         ENABLE_INTERRUPTS(CLBR_NONE)
476         SAVE_ARGS 8,1
477         movq  %rax,ORIG_RAX-ARGOFFSET(%rsp)
478         movq  %rcx,RIP-ARGOFFSET(%rsp)
479         CFI_REL_OFFSET rip,RIP-ARGOFFSET
480         GET_THREAD_INFO(%rcx)
481         testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
482         jnz tracesys
483 system_call_fastpath:
484         cmpq $__NR_syscall_max,%rax
485         ja badsys
486         movq %r10,%rcx
487         call *sys_call_table(,%rax,8)  # XXX:    rip relative
488         movq %rax,RAX-ARGOFFSET(%rsp)
489 /*
490  * Syscall return path ending with SYSRET (fast path)
491  * Has incomplete stack frame and undefined top of stack.
492  */
493 ret_from_sys_call:
494         movl $_TIF_ALLWORK_MASK,%edi
495         /* edi: flagmask */
496 sysret_check:
497         LOCKDEP_SYS_EXIT
498         GET_THREAD_INFO(%rcx)
499         DISABLE_INTERRUPTS(CLBR_NONE)
500         TRACE_IRQS_OFF
501         movl TI_flags(%rcx),%edx
502         andl %edi,%edx
503         jnz  sysret_careful
504         CFI_REMEMBER_STATE
505         /*
506          * sysretq will re-enable interrupts:
507          */
508         TRACE_IRQS_ON
509         movq RIP-ARGOFFSET(%rsp),%rcx
510         CFI_REGISTER    rip,rcx
511         RESTORE_ARGS 0,-ARG_SKIP,1
512         /*CFI_REGISTER  rflags,r11*/
513         movq    PER_CPU_VAR(old_rsp), %rsp
514         USERGS_SYSRET64
515 
516         CFI_RESTORE_STATE
517         /* Handle reschedules */
518         /* edx: work, edi: workmask */
519 sysret_careful:
520         bt $TIF_NEED_RESCHED,%edx
521         jnc sysret_signal
522         TRACE_IRQS_ON
523         ENABLE_INTERRUPTS(CLBR_NONE)
524         pushq_cfi %rdi
525         call schedule
526         popq_cfi %rdi
527         jmp sysret_check
528 
529         /* Handle a signal */
530 sysret_signal:
531         TRACE_IRQS_ON
532         ENABLE_INTERRUPTS(CLBR_NONE)
533 #ifdef CONFIG_AUDITSYSCALL
534         bt $TIF_SYSCALL_AUDIT,%edx
535         jc sysret_audit
536 #endif
537         /*
538          * We have a signal, or exit tracing or single-step.
539          * These all wind up with the iret return path anyway,
540          * so just join that path right now.
541          */
542         FIXUP_TOP_OF_STACK %r11, -ARGOFFSET
543         jmp int_check_syscall_exit_work
544 
545 badsys:
546         movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
547         jmp ret_from_sys_call
548 
549 #ifdef CONFIG_AUDITSYSCALL
550         /*
551          * Fast path for syscall audit without full syscall trace.
552          * We just call audit_syscall_entry() directly, and then
553          * jump back to the normal fast path.
554          */
555 auditsys:
556         movq %r10,%r9                   /* 6th arg: 4th syscall arg */
557         movq %rdx,%r8                   /* 5th arg: 3rd syscall arg */
558         movq %rsi,%rcx                  /* 4th arg: 2nd syscall arg */
559         movq %rdi,%rdx                  /* 3rd arg: 1st syscall arg */
560         movq %rax,%rsi                  /* 2nd arg: syscall number */
561         movl $AUDIT_ARCH_X86_64,%edi    /* 1st arg: audit arch */
562         call audit_syscall_entry
563         LOAD_ARGS 0             /* reload call-clobbered registers */
564         jmp system_call_fastpath
565 
566         /*
567          * Return fast path for syscall audit.  Call audit_syscall_exit()
568          * directly and then jump back to the fast path with TIF_SYSCALL_AUDIT
569          * masked off.
570          */
571 sysret_audit:
572         movq RAX-ARGOFFSET(%rsp),%rsi   /* second arg, syscall return value */
573         cmpq $0,%rsi            /* is it < 0? */
574         setl %al                /* 1 if so, 0 if not */
575         movzbl %al,%edi         /* zero-extend that into %edi */
576         inc %edi /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
577         call audit_syscall_exit
578         movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
579         jmp sysret_check
580 #endif  /* CONFIG_AUDITSYSCALL */
581 
582         /* Do syscall tracing */
583 tracesys:
584 #ifdef CONFIG_AUDITSYSCALL
585         testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
586         jz auditsys
587 #endif
588         SAVE_REST
589         movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
590         FIXUP_TOP_OF_STACK %rdi
591         movq %rsp,%rdi
592         call syscall_trace_enter
593         /*
594          * Reload arg registers from stack in case ptrace changed them.
595          * We don't reload %rax because syscall_trace_enter() returned
596          * the value it wants us to use in the table lookup.
597          */
598         LOAD_ARGS ARGOFFSET, 1
599         RESTORE_REST
600         cmpq $__NR_syscall_max,%rax
601         ja   int_ret_from_sys_call      /* RAX(%rsp) set to -ENOSYS above */
602         movq %r10,%rcx  /* fixup for C */
603         call *sys_call_table(,%rax,8)
604         movq %rax,RAX-ARGOFFSET(%rsp)
605         /* Use IRET because user could have changed frame */
606 
607 /*
608  * Syscall return path ending with IRET.
609  * Has correct top of stack, but partial stack frame.
610  */
611 GLOBAL(int_ret_from_sys_call)
612         DISABLE_INTERRUPTS(CLBR_NONE)
613         TRACE_IRQS_OFF
614         testl $3,CS-ARGOFFSET(%rsp)
615         je retint_restore_args
616         movl $_TIF_ALLWORK_MASK,%edi
617         /* edi: mask to check */
618 GLOBAL(int_with_check)
619         LOCKDEP_SYS_EXIT_IRQ
620         GET_THREAD_INFO(%rcx)
621         movl TI_flags(%rcx),%edx
622         andl %edi,%edx
623         jnz   int_careful
624         andl    $~TS_COMPAT,TI_status(%rcx)
625         jmp   retint_swapgs
626 
627         /* Either reschedule or signal or syscall exit tracking needed. */
628         /* First do a reschedule test. */
629         /* edx: work, edi: workmask */
630 int_careful:
631         bt $TIF_NEED_RESCHED,%edx
632         jnc  int_very_careful
633         TRACE_IRQS_ON
634         ENABLE_INTERRUPTS(CLBR_NONE)
635         pushq_cfi %rdi
636         call schedule
637         popq_cfi %rdi
638         DISABLE_INTERRUPTS(CLBR_NONE)
639         TRACE_IRQS_OFF
640         jmp int_with_check
641 
642         /* handle signals and tracing -- both require a full stack frame */
643 int_very_careful:
644         TRACE_IRQS_ON
645         ENABLE_INTERRUPTS(CLBR_NONE)
646 int_check_syscall_exit_work:
647         SAVE_REST
648         /* Check for syscall exit trace */
649         testl $_TIF_WORK_SYSCALL_EXIT,%edx
650         jz int_signal
651         pushq_cfi %rdi
652         leaq 8(%rsp),%rdi       # &ptregs -> arg1
653         call syscall_trace_leave
654         popq_cfi %rdi
655         andl $~(_TIF_WORK_SYSCALL_EXIT|_TIF_SYSCALL_EMU),%edi
656         jmp int_restore_rest
657 
658 int_signal:
659         testl $_TIF_DO_NOTIFY_MASK,%edx
660         jz 1f
661         movq %rsp,%rdi          # &ptregs -> arg1
662         xorl %esi,%esi          # oldset -> arg2
663         call do_notify_resume
664 1:      movl $_TIF_WORK_MASK,%edi
665 int_restore_rest:
666         RESTORE_REST
667         DISABLE_INTERRUPTS(CLBR_NONE)
668         TRACE_IRQS_OFF
669         jmp int_with_check
670         CFI_ENDPROC
671 END(system_call)
672 
673 /*
674  * Certain special system calls that need to save a complete full stack frame.
675  */
676         .macro PTREGSCALL label,func,arg
677 ENTRY(\label)
678         PARTIAL_FRAME 1 8               /* offset 8: return address */
679         subq $REST_SKIP, %rsp
680         CFI_ADJUST_CFA_OFFSET REST_SKIP
681         call save_rest
682         DEFAULT_FRAME 0 8               /* offset 8: return address */
683         leaq 8(%rsp), \arg      /* pt_regs pointer */
684         call \func
685         jmp ptregscall_common
686         CFI_ENDPROC
687 END(\label)
688         .endm
689 
690         PTREGSCALL stub_clone, sys_clone, %r8
691         PTREGSCALL stub_fork, sys_fork, %rdi
692         PTREGSCALL stub_vfork, sys_vfork, %rdi
693         PTREGSCALL stub_sigaltstack, sys_sigaltstack, %rdx
694         PTREGSCALL stub_iopl, sys_iopl, %rsi
695 
696 ENTRY(ptregscall_common)
697         DEFAULT_FRAME 1 8       /* offset 8: return address */
698         RESTORE_TOP_OF_STACK %r11, 8
699         movq_cfi_restore R15+8, r15
700         movq_cfi_restore R14+8, r14
701         movq_cfi_restore R13+8, r13
702         movq_cfi_restore R12+8, r12
703         movq_cfi_restore RBP+8, rbp
704         movq_cfi_restore RBX+8, rbx
705         ret $REST_SKIP          /* pop extended registers */
706         CFI_ENDPROC
707 END(ptregscall_common)
708 
709 ENTRY(stub_execve)
710         CFI_STARTPROC
711         addq $8, %rsp
712         PARTIAL_FRAME 0
713         SAVE_REST
714         FIXUP_TOP_OF_STACK %r11
715         movq %rsp, %rcx
716         call sys_execve
717         RESTORE_TOP_OF_STACK %r11
718         movq %rax,RAX(%rsp)
719         RESTORE_REST
720         jmp int_ret_from_sys_call
721         CFI_ENDPROC
722 END(stub_execve)
723 
724 /*
725  * sigreturn is special because it needs to restore all registers on return.
726  * This cannot be done with SYSRET, so use the IRET return path instead.
727  */
728 ENTRY(stub_rt_sigreturn)
729         CFI_STARTPROC
730         addq $8, %rsp
731         PARTIAL_FRAME 0
732         SAVE_REST
733         movq %rsp,%rdi
734         FIXUP_TOP_OF_STACK %r11
735         call sys_rt_sigreturn
736         movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer
737         RESTORE_REST
738         jmp int_ret_from_sys_call
739         CFI_ENDPROC
740 END(stub_rt_sigreturn)
741 
742 /*
743  * Build the entry stubs and pointer table with some assembler magic.
744  * We pack 7 stubs into a single 32-byte chunk, which will fit in a
745  * single cache line on all modern x86 implementations.
746  */
747         .section .init.rodata,"a"
748 ENTRY(interrupt)
749         .section .entry.text
750         .p2align 5
751         .p2align CONFIG_X86_L1_CACHE_SHIFT
752 ENTRY(irq_entries_start)
753         INTR_FRAME
754 vector=FIRST_EXTERNAL_VECTOR
755 .rept (NR_VECTORS-FIRST_EXTERNAL_VECTOR+6)/7
756         .balign 32
757   .rept 7
758     .if vector < NR_VECTORS
759       .if vector <> FIRST_EXTERNAL_VECTOR
760         CFI_ADJUST_CFA_OFFSET -8
761       .endif
762 1:      pushq_cfi $(~vector+0x80)       /* Note: always in signed byte range */
763       .if ((vector-FIRST_EXTERNAL_VECTOR)%7) <> 6
764         jmp 2f
765       .endif
766       .previous
767         .quad 1b
768       .section .entry.text
769 vector=vector+1
770     .endif
771   .endr
772 2:      jmp common_interrupt
773 .endr
774         CFI_ENDPROC
775 END(irq_entries_start)
776 
777 .previous
778 END(interrupt)
779 .previous
780 
781 /*
782  * Interrupt entry/exit.
783  *
784  * Interrupt entry points save only callee clobbered registers in fast path.
785  *
786  * Entry runs with interrupts off.
787  */
788 
789 /* 0(%rsp): ~(interrupt number) */
790         .macro interrupt func
791         /* reserve pt_regs for scratch regs and rbp */
792         subq $ORIG_RAX-RBP, %rsp
793         CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
794         call save_args
795         PARTIAL_FRAME 0
796         call \func
797         .endm
798 
799 /*
800  * Interrupt entry/exit should be protected against kprobes
801  */
802         .pushsection .kprobes.text, "ax"
803         /*
804          * The interrupt stubs push (~vector+0x80) onto the stack and
805          * then jump to common_interrupt.
806          */
807         .p2align CONFIG_X86_L1_CACHE_SHIFT
808 common_interrupt:
809         XCPT_FRAME
810         addq $-0x80,(%rsp)              /* Adjust vector to [-256,-1] range */
811         interrupt do_IRQ
812         /* 0(%rsp): old_rsp-ARGOFFSET */
813 ret_from_intr:
814         DISABLE_INTERRUPTS(CLBR_NONE)
815         TRACE_IRQS_OFF
816         decl PER_CPU_VAR(irq_count)
817         leaveq
818 
819         CFI_RESTORE             rbp
820         CFI_DEF_CFA_REGISTER    rsp
821         CFI_ADJUST_CFA_OFFSET   -8
822 
823         /* we did not save rbx, restore only from ARGOFFSET */
824         addq $8, %rsp
825         CFI_ADJUST_CFA_OFFSET   -8
826 exit_intr:
827         GET_THREAD_INFO(%rcx)
828         testl $3,CS-ARGOFFSET(%rsp)
829         je retint_kernel
830 
831         /* Interrupt came from user space */
832         /*
833          * Has a correct top of stack, but a partial stack frame
834          * %rcx: thread info. Interrupts off.
835          */
836 retint_with_reschedule:
837         movl $_TIF_WORK_MASK,%edi
838 retint_check:
839         LOCKDEP_SYS_EXIT_IRQ
840         movl TI_flags(%rcx),%edx
841         andl %edi,%edx
842         CFI_REMEMBER_STATE
843         jnz  retint_careful
844 
845 retint_swapgs:          /* return to user-space */
846         /*
847          * The iretq could re-enable interrupts:
848          */
849         DISABLE_INTERRUPTS(CLBR_ANY)
850         TRACE_IRQS_IRETQ
851         SWAPGS
852         jmp restore_args
853 
854 retint_restore_args:    /* return to kernel space */
855         DISABLE_INTERRUPTS(CLBR_ANY)
856         /*
857          * The iretq could re-enable interrupts:
858          */
859         TRACE_IRQS_IRETQ
860 restore_args:
861         RESTORE_ARGS 0,8,0
862 
863 irq_return:
864         INTERRUPT_RETURN
865 
866         .section __ex_table, "a"
867         .quad irq_return, bad_iret
868         .previous
869 
870 #ifdef CONFIG_PARAVIRT
871 ENTRY(native_iret)
872         iretq
873 
874         .section __ex_table,"a"
875         .quad native_iret, bad_iret
876         .previous
877 #endif
878 
879         .section .fixup,"ax"
880 bad_iret:
881         /*
882          * The iret traps when the %cs or %ss being restored is bogus.
883          * We've lost the original trap vector and error code.
884          * #GPF is the most likely one to get for an invalid selector.
885          * So pretend we completed the iret and took the #GPF in user mode.
886          *
887          * We are now running with the kernel GS after exception recovery.
888          * But error_entry expects us to have user GS to match the user %cs,
889          * so swap back.
890          */
891         pushq $0
892 
893         SWAPGS
894         jmp general_protection
895 
896         .previous
897 
898         /* edi: workmask, edx: work */
899 retint_careful:
900         CFI_RESTORE_STATE
901         bt    $TIF_NEED_RESCHED,%edx
902         jnc   retint_signal
903         TRACE_IRQS_ON
904         ENABLE_INTERRUPTS(CLBR_NONE)
905         pushq_cfi %rdi
906         call  schedule
907         popq_cfi %rdi
908         GET_THREAD_INFO(%rcx)
909         DISABLE_INTERRUPTS(CLBR_NONE)
910         TRACE_IRQS_OFF
911         jmp retint_check
912 
913 retint_signal:
914         testl $_TIF_DO_NOTIFY_MASK,%edx
915         jz    retint_swapgs
916         TRACE_IRQS_ON
917         ENABLE_INTERRUPTS(CLBR_NONE)
918         SAVE_REST
919         movq $-1,ORIG_RAX(%rsp)
920         xorl %esi,%esi          # oldset
921         movq %rsp,%rdi          # &pt_regs
922         call do_notify_resume
923         RESTORE_REST
924         DISABLE_INTERRUPTS(CLBR_NONE)
925         TRACE_IRQS_OFF
926         GET_THREAD_INFO(%rcx)
927         jmp retint_with_reschedule
928 
929 #ifdef CONFIG_PREEMPT
930         /* Returning to kernel space. Check if we need preemption */
931         /* rcx:  threadinfo. interrupts off. */
932 ENTRY(retint_kernel)
933         cmpl $0,TI_preempt_count(%rcx)
934         jnz  retint_restore_args
935         bt  $TIF_NEED_RESCHED,TI_flags(%rcx)
936         jnc  retint_restore_args
937         bt   $9,EFLAGS-ARGOFFSET(%rsp)  /* interrupts off? */
938         jnc  retint_restore_args
939         call preempt_schedule_irq
940         jmp exit_intr
941 #endif
942 
943         CFI_ENDPROC
944 END(common_interrupt)
945 /*
946  * End of kprobes section
947  */
948        .popsection
949 
950 /*
951  * APIC interrupts.
952  */
953 .macro apicinterrupt num sym do_sym
954 ENTRY(\sym)
955         INTR_FRAME
956         pushq_cfi $~(\num)
957         interrupt \do_sym
958         jmp ret_from_intr
959         CFI_ENDPROC
960 END(\sym)
961 .endm
962 
963 #ifdef CONFIG_SMP
964 apicinterrupt IRQ_MOVE_CLEANUP_VECTOR \
965         irq_move_cleanup_interrupt smp_irq_move_cleanup_interrupt
966 apicinterrupt REBOOT_VECTOR \
967         reboot_interrupt smp_reboot_interrupt
968 #endif
969 
970 #ifdef CONFIG_X86_UV
971 apicinterrupt UV_BAU_MESSAGE \
972         uv_bau_message_intr1 uv_bau_message_interrupt
973 #endif
974 apicinterrupt LOCAL_TIMER_VECTOR \
975         apic_timer_interrupt smp_apic_timer_interrupt
976 apicinterrupt X86_PLATFORM_IPI_VECTOR \
977         x86_platform_ipi smp_x86_platform_ipi
978 
979 #ifdef CONFIG_SMP
980 .irp idx,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15, \
981         16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
982 .if NUM_INVALIDATE_TLB_VECTORS > \idx
983 apicinterrupt (INVALIDATE_TLB_VECTOR_START)+\idx \
984         invalidate_interrupt\idx smp_invalidate_interrupt
985 .endif
986 .endr
987 #endif
988 
989 apicinterrupt THRESHOLD_APIC_VECTOR \
990         threshold_interrupt smp_threshold_interrupt
991 apicinterrupt THERMAL_APIC_VECTOR \
992         thermal_interrupt smp_thermal_interrupt
993 
994 #ifdef CONFIG_X86_MCE
995 apicinterrupt MCE_SELF_VECTOR \
996         mce_self_interrupt smp_mce_self_interrupt
997 #endif
998 
999 #ifdef CONFIG_SMP
1000 apicinterrupt CALL_FUNCTION_SINGLE_VECTOR \
1001         call_function_single_interrupt smp_call_function_single_interrupt
1002 apicinterrupt CALL_FUNCTION_VECTOR \
1003         call_function_interrupt smp_call_function_interrupt
1004 apicinterrupt RESCHEDULE_VECTOR \
1005         reschedule_interrupt smp_reschedule_interrupt
1006 #endif
1007 
1008 apicinterrupt ERROR_APIC_VECTOR \
1009         error_interrupt smp_error_interrupt
1010 apicinterrupt SPURIOUS_APIC_VECTOR \
1011         spurious_interrupt smp_spurious_interrupt
1012 
1013 #ifdef CONFIG_IRQ_WORK
1014 apicinterrupt IRQ_WORK_VECTOR \
1015         irq_work_interrupt smp_irq_work_interrupt
1016 #endif
1017 
1018 /*
1019  * Exception entry points.
1020  */
1021 .macro zeroentry sym do_sym
1022 ENTRY(\sym)
1023         INTR_FRAME
1024         PARAVIRT_ADJUST_EXCEPTION_FRAME
1025         pushq_cfi $-1           /* ORIG_RAX: no syscall to restart */
1026         subq $ORIG_RAX-R15, %rsp
1027         CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
1028         call error_entry
1029         DEFAULT_FRAME 0
1030         movq %rsp,%rdi          /* pt_regs pointer */
1031         xorl %esi,%esi          /* no error code */
1032         call \do_sym
1033         jmp error_exit          /* %ebx: no swapgs flag */
1034         CFI_ENDPROC
1035 END(\sym)
1036 .endm
1037 
1038 .macro paranoidzeroentry sym do_sym
1039 ENTRY(\sym)
1040         INTR_FRAME
1041         PARAVIRT_ADJUST_EXCEPTION_FRAME
1042         pushq_cfi $-1           /* ORIG_RAX: no syscall to restart */
1043         subq $ORIG_RAX-R15, %rsp
1044         CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
1045         call save_paranoid
1046         TRACE_IRQS_OFF
1047         movq %rsp,%rdi          /* pt_regs pointer */
1048         xorl %esi,%esi          /* no error code */
1049         call \do_sym
1050         jmp paranoid_exit       /* %ebx: no swapgs flag */
1051         CFI_ENDPROC
1052 END(\sym)
1053 .endm
1054 
1055 #define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
1056 .macro paranoidzeroentry_ist sym do_sym ist
1057 ENTRY(\sym)
1058         INTR_FRAME
1059         PARAVIRT_ADJUST_EXCEPTION_FRAME
1060         pushq_cfi $-1           /* ORIG_RAX: no syscall to restart */
1061         subq $ORIG_RAX-R15, %rsp
1062         CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
1063         call save_paranoid
1064         TRACE_IRQS_OFF
1065         movq %rsp,%rdi          /* pt_regs pointer */
1066         xorl %esi,%esi          /* no error code */
1067         subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
1068         call \do_sym
1069         addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
1070         jmp paranoid_exit       /* %ebx: no swapgs flag */
1071         CFI_ENDPROC
1072 END(\sym)
1073 .endm
1074 
1075 .macro errorentry sym do_sym
1076 ENTRY(\sym)
1077         XCPT_FRAME
1078         PARAVIRT_ADJUST_EXCEPTION_FRAME
1079         subq $ORIG_RAX-R15, %rsp
1080         CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
1081         call error_entry
1082         DEFAULT_FRAME 0
1083         movq %rsp,%rdi                  /* pt_regs pointer */
1084         movq ORIG_RAX(%rsp),%rsi        /* get error code */
1085         movq $-1,ORIG_RAX(%rsp)         /* no syscall to restart */
1086         call \do_sym
1087         jmp error_exit                  /* %ebx: no swapgs flag */
1088         CFI_ENDPROC
1089 END(\sym)
1090 .endm
1091 
1092         /* error code is on the stack already */
1093 .macro paranoiderrorentry sym do_sym
1094 ENTRY(\sym)
1095         XCPT_FRAME
1096         PARAVIRT_ADJUST_EXCEPTION_FRAME
1097         subq $ORIG_RAX-R15, %rsp
1098         CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
1099         call save_paranoid
1100         DEFAULT_FRAME 0
1101         TRACE_IRQS_OFF
1102         movq %rsp,%rdi                  /* pt_regs pointer */
1103         movq ORIG_RAX(%rsp),%rsi        /* get error code */
1104         movq $-1,ORIG_RAX(%rsp)         /* no syscall to restart */
1105         call \do_sym
1106         jmp paranoid_exit               /* %ebx: no swapgs flag */
1107         CFI_ENDPROC
1108 END(\sym)
1109 .endm
1110 
1111 zeroentry divide_error do_divide_error
1112 zeroentry overflow do_overflow
1113 zeroentry bounds do_bounds
1114 zeroentry invalid_op do_invalid_op
1115 zeroentry device_not_available do_device_not_available
1116 paranoiderrorentry double_fault do_double_fault
1117 zeroentry coprocessor_segment_overrun do_coprocessor_segment_overrun
1118 errorentry invalid_TSS do_invalid_TSS
1119 errorentry segment_not_present do_segment_not_present
1120 zeroentry spurious_interrupt_bug do_spurious_interrupt_bug
1121 zeroentry coprocessor_error do_coprocessor_error
1122 errorentry alignment_check do_alignment_check
1123 zeroentry simd_coprocessor_error do_simd_coprocessor_error
1124 
1125         /* Reload gs selector with exception handling */
1126         /* edi:  new selector */
1127 ENTRY(native_load_gs_index)
1128         CFI_STARTPROC
1129         pushfq_cfi
1130         DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI)
1131         SWAPGS
1132 gs_change:
1133         movl %edi,%gs
1134 2:      mfence          /* workaround */
1135         SWAPGS
1136         popfq_cfi
1137         ret
1138         CFI_ENDPROC
1139 END(native_load_gs_index)
1140 
1141         .section __ex_table,"a"
1142         .align 8
1143         .quad gs_change,bad_gs
1144         .previous
1145         .section .fixup,"ax"
1146         /* running with kernelgs */
1147 bad_gs:
1148         SWAPGS                  /* switch back to user gs */
1149         xorl %eax,%eax
1150         movl %eax,%gs
1151         jmp  2b
1152         .previous
1153 
1154 ENTRY(kernel_thread_helper)
1155         pushq $0                # fake return address
1156         CFI_STARTPROC
1157         /*
1158          * Here we are in the child and the registers are set as they were
1159          * at kernel_thread() invocation in the parent.
1160          */
1161         call *%rsi
1162         # exit
1163         mov %eax, %edi
1164         call do_exit
1165         ud2                     # padding for call trace
1166         CFI_ENDPROC
1167 END(kernel_thread_helper)
1168 
1169 /*
1170  * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
1171  *
1172  * C extern interface:
1173  *       extern long execve(const char *name, char **argv, char **envp)
1174  *
1175  * asm input arguments:
1176  *      rdi: name, rsi: argv, rdx: envp
1177  *
1178  * We want to fallback into:
1179  *      extern long sys_execve(const char *name, char **argv,char **envp, struct pt_regs *regs)
1180  *
1181  * do_sys_execve asm fallback arguments:
1182  *      rdi: name, rsi: argv, rdx: envp, rcx: fake frame on the stack
1183  */
1184 ENTRY(kernel_execve)
1185         CFI_STARTPROC
1186         FAKE_STACK_FRAME $0
1187         SAVE_ALL
1188         movq %rsp,%rcx
1189         call sys_execve
1190         movq %rax, RAX(%rsp)
1191         RESTORE_REST
1192         testq %rax,%rax
1193         je int_ret_from_sys_call
1194         RESTORE_ARGS
1195         UNFAKE_STACK_FRAME
1196         ret
1197         CFI_ENDPROC
1198 END(kernel_execve)
1199 
1200 /* Call softirq on interrupt stack. Interrupts are off. */
1201 ENTRY(call_softirq)
1202         CFI_STARTPROC
1203         pushq_cfi %rbp
1204         CFI_REL_OFFSET rbp,0
1205         mov  %rsp,%rbp
1206         CFI_DEF_CFA_REGISTER rbp
1207         incl PER_CPU_VAR(irq_count)
1208         cmove PER_CPU_VAR(irq_stack_ptr),%rsp
1209         push  %rbp                      # backlink for old unwinder
1210         call __do_softirq
1211         leaveq
1212         CFI_RESTORE             rbp
1213         CFI_DEF_CFA_REGISTER    rsp
1214         CFI_ADJUST_CFA_OFFSET   -8
1215         decl PER_CPU_VAR(irq_count)
1216         ret
1217         CFI_ENDPROC
1218 END(call_softirq)
1219 
1220 #ifdef CONFIG_XEN
1221 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
1222 
1223 /*
1224  * A note on the "critical region" in our callback handler.
1225  * We want to avoid stacking callback handlers due to events occurring
1226  * during handling of the last event. To do this, we keep events disabled
1227  * until we've done all processing. HOWEVER, we must enable events before
1228  * popping the stack frame (can't be done atomically) and so it would still
1229  * be possible to get enough handler activations to overflow the stack.
1230  * Although unlikely, bugs of that kind are hard to track down, so we'd
1231  * like to avoid the possibility.
1232  * So, on entry to the handler we detect whether we interrupted an
1233  * existing activation in its critical region -- if so, we pop the current
1234  * activation and restart the handler using the previous one.
1235  */
1236 ENTRY(xen_do_hypervisor_callback)   # do_hypervisor_callback(struct *pt_regs)
1237         CFI_STARTPROC
1238 /*
1239  * Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will
1240  * see the correct pointer to the pt_regs
1241  */
1242         movq %rdi, %rsp            # we don't return, adjust the stack frame
1243         CFI_ENDPROC
1244         DEFAULT_FRAME
1245 11:     incl PER_CPU_VAR(irq_count)
1246         movq %rsp,%rbp
1247         CFI_DEF_CFA_REGISTER rbp
1248         cmovzq PER_CPU_VAR(irq_stack_ptr),%rsp
1249         pushq %rbp                      # backlink for old unwinder
1250         call xen_evtchn_do_upcall
1251         popq %rsp
1252         CFI_DEF_CFA_REGISTER rsp
1253         decl PER_CPU_VAR(irq_count)
1254         jmp  error_exit
1255         CFI_ENDPROC
1256 END(xen_do_hypervisor_callback)
1257 
1258 /*
1259  * Hypervisor uses this for application faults while it executes.
1260  * We get here for two reasons:
1261  *  1. Fault while reloading DS, ES, FS or GS
1262  *  2. Fault while executing IRET
1263  * Category 1 we do not need to fix up as Xen has already reloaded all segment
1264  * registers that could be reloaded and zeroed the others.
1265  * Category 2 we fix up by killing the current process. We cannot use the
1266  * normal Linux return path in this case because if we use the IRET hypercall
1267  * to pop the stack frame we end up in an infinite loop of failsafe callbacks.
1268  * We distinguish between categories by comparing each saved segment register
1269  * with its current contents: any discrepancy means we in category 1.
1270  */
1271 ENTRY(xen_failsafe_callback)
1272         INTR_FRAME 1 (6*8)
1273         /*CFI_REL_OFFSET gs,GS*/
1274         /*CFI_REL_OFFSET fs,FS*/
1275         /*CFI_REL_OFFSET es,ES*/
1276         /*CFI_REL_OFFSET ds,DS*/
1277         CFI_REL_OFFSET r11,8
1278         CFI_REL_OFFSET rcx,0
1279         movw %ds,%cx
1280         cmpw %cx,0x10(%rsp)
1281         CFI_REMEMBER_STATE
1282         jne 1f
1283         movw %es,%cx
1284         cmpw %cx,0x18(%rsp)
1285         jne 1f
1286         movw %fs,%cx
1287         cmpw %cx,0x20(%rsp)
1288         jne 1f
1289         movw %gs,%cx
1290         cmpw %cx,0x28(%rsp)
1291         jne 1f
1292         /* All segments match their saved values => Category 2 (Bad IRET). */
1293         movq (%rsp),%rcx
1294         CFI_RESTORE rcx
1295         movq 8(%rsp),%r11
1296         CFI_RESTORE r11
1297         addq $0x30,%rsp
1298         CFI_ADJUST_CFA_OFFSET -0x30
1299         pushq_cfi $0    /* RIP */
1300         pushq_cfi %r11
1301         pushq_cfi %rcx
1302         jmp general_protection
1303         CFI_RESTORE_STATE
1304 1:      /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */
1305         movq (%rsp),%rcx
1306         CFI_RESTORE rcx
1307         movq 8(%rsp),%r11
1308         CFI_RESTORE r11
1309         addq $0x30,%rsp
1310         CFI_ADJUST_CFA_OFFSET -0x30
1311         pushq_cfi $0
1312         SAVE_ALL
1313         jmp error_exit
1314         CFI_ENDPROC
1315 END(xen_failsafe_callback)
1316 
1317 apicinterrupt XEN_HVM_EVTCHN_CALLBACK \
1318         xen_hvm_callback_vector xen_evtchn_do_upcall
1319 
1320 #endif /* CONFIG_XEN */
1321 
1322 /*
1323  * Some functions should be protected against kprobes
1324  */
1325         .pushsection .kprobes.text, "ax"
1326 
1327 paranoidzeroentry_ist debug do_debug DEBUG_STACK
1328 paranoidzeroentry_ist int3 do_int3 DEBUG_STACK
1329 paranoiderrorentry stack_segment do_stack_segment
1330 #ifdef CONFIG_XEN
1331 zeroentry xen_debug do_debug
1332 zeroentry xen_int3 do_int3
1333 errorentry xen_stack_segment do_stack_segment
1334 #endif
1335 errorentry general_protection do_general_protection
1336 errorentry page_fault do_page_fault
1337 #ifdef CONFIG_KVM_GUEST
1338 errorentry async_page_fault do_async_page_fault
1339 #endif
1340 #ifdef CONFIG_X86_MCE
1341 paranoidzeroentry machine_check *machine_check_vector(%rip)
1342 #endif
1343 
1344         /*
1345          * "Paranoid" exit path from exception stack.
1346          * Paranoid because this is used by NMIs and cannot take
1347          * any kernel state for granted.
1348          * We don't do kernel preemption checks here, because only
1349          * NMI should be common and it does not enable IRQs and
1350          * cannot get reschedule ticks.
1351          *
1352          * "trace" is 0 for the NMI handler only, because irq-tracing
1353          * is fundamentally NMI-unsafe. (we cannot change the soft and
1354          * hard flags at once, atomically)
1355          */
1356 
1357         /* ebx: no swapgs flag */
1358 ENTRY(paranoid_exit)
1359         DEFAULT_FRAME
1360         DISABLE_INTERRUPTS(CLBR_NONE)
1361         TRACE_IRQS_OFF
1362         testl %ebx,%ebx                         /* swapgs needed? */
1363         jnz paranoid_restore
1364         testl $3,CS(%rsp)
1365         jnz   paranoid_userspace
1366 paranoid_swapgs:
1367         TRACE_IRQS_IRETQ 0
1368         SWAPGS_UNSAFE_STACK
1369         RESTORE_ALL 8
1370         jmp irq_return
1371 paranoid_restore:
1372         TRACE_IRQS_IRETQ 0
1373         RESTORE_ALL 8
1374         jmp irq_return
1375 paranoid_userspace:
1376         GET_THREAD_INFO(%rcx)
1377         movl TI_flags(%rcx),%ebx
1378         andl $_TIF_WORK_MASK,%ebx
1379         jz paranoid_swapgs
1380         movq %rsp,%rdi                  /* &pt_regs */
1381         call sync_regs
1382         movq %rax,%rsp                  /* switch stack for scheduling */
1383         testl $_TIF_NEED_RESCHED,%ebx
1384         jnz paranoid_schedule
1385         movl %ebx,%edx                  /* arg3: thread flags */
1386         TRACE_IRQS_ON
1387         ENABLE_INTERRUPTS(CLBR_NONE)
1388         xorl %esi,%esi                  /* arg2: oldset */
1389         movq %rsp,%rdi                  /* arg1: &pt_regs */
1390         call do_notify_resume
1391         DISABLE_INTERRUPTS(CLBR_NONE)
1392         TRACE_IRQS_OFF
1393         jmp paranoid_userspace
1394 paranoid_schedule:
1395         TRACE_IRQS_ON
1396         ENABLE_INTERRUPTS(CLBR_ANY)
1397         call schedule
1398         DISABLE_INTERRUPTS(CLBR_ANY)
1399         TRACE_IRQS_OFF
1400         jmp paranoid_userspace
1401         CFI_ENDPROC
1402 END(paranoid_exit)
1403 
1404 /*
1405  * Exception entry point. This expects an error code/orig_rax on the stack.
1406  * returns in "no swapgs flag" in %ebx.
1407  */
1408 ENTRY(error_entry)
1409         XCPT_FRAME
1410         CFI_ADJUST_CFA_OFFSET 15*8
1411         /* oldrax contains error code */
1412         cld
1413         movq_cfi rdi, RDI+8
1414         movq_cfi rsi, RSI+8
1415         movq_cfi rdx, RDX+8
1416         movq_cfi rcx, RCX+8
1417         movq_cfi rax, RAX+8
1418         movq_cfi  r8,  R8+8
1419         movq_cfi  r9,  R9+8
1420         movq_cfi r10, R10+8
1421         movq_cfi r11, R11+8
1422         movq_cfi rbx, RBX+8
1423         movq_cfi rbp, RBP+8
1424         movq_cfi r12, R12+8
1425         movq_cfi r13, R13+8
1426         movq_cfi r14, R14+8
1427         movq_cfi r15, R15+8
1428         xorl %ebx,%ebx
1429         testl $3,CS+8(%rsp)
1430         je error_kernelspace
1431 error_swapgs:
1432         SWAPGS
1433 error_sti:
1434         TRACE_IRQS_OFF
1435         ret
1436 
1437 /*
1438  * There are two places in the kernel that can potentially fault with
1439  * usergs. Handle them here. The exception handlers after iret run with
1440  * kernel gs again, so don't set the user space flag. B stepping K8s
1441  * sometimes report an truncated RIP for IRET exceptions returning to
1442  * compat mode. Check for these here too.
1443  */
1444 error_kernelspace:
1445         incl %ebx
1446         leaq irq_return(%rip),%rcx
1447         cmpq %rcx,RIP+8(%rsp)
1448         je error_swapgs
1449         movl %ecx,%eax  /* zero extend */
1450         cmpq %rax,RIP+8(%rsp)
1451         je bstep_iret
1452         cmpq $gs_change,RIP+8(%rsp)
1453         je error_swapgs
1454         jmp error_sti
1455 
1456 bstep_iret:
1457         /* Fix truncated RIP */
1458         movq %rcx,RIP+8(%rsp)
1459         jmp error_swapgs
1460         CFI_ENDPROC
1461 END(error_entry)
1462 
1463 
1464 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
1465 ENTRY(error_exit)
1466         DEFAULT_FRAME
1467         movl %ebx,%eax
1468         RESTORE_REST
1469         DISABLE_INTERRUPTS(CLBR_NONE)
1470         TRACE_IRQS_OFF
1471         GET_THREAD_INFO(%rcx)
1472         testl %eax,%eax
1473         jne retint_kernel
1474         LOCKDEP_SYS_EXIT_IRQ
1475         movl TI_flags(%rcx),%edx
1476         movl $_TIF_WORK_MASK,%edi
1477         andl %edi,%edx
1478         jnz retint_careful
1479         jmp retint_swapgs
1480         CFI_ENDPROC
1481 END(error_exit)
1482 
1483 
1484         /* runs on exception stack */
1485 ENTRY(nmi)
1486         INTR_FRAME
1487         PARAVIRT_ADJUST_EXCEPTION_FRAME
1488         pushq_cfi $-1
1489         subq $ORIG_RAX-R15, %rsp
1490         CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
1491         call save_paranoid
1492         DEFAULT_FRAME 0
1493         /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
1494         movq %rsp,%rdi
1495         movq $-1,%rsi
1496         call do_nmi
1497 #ifdef CONFIG_TRACE_IRQFLAGS
1498         /* paranoidexit; without TRACE_IRQS_OFF */
1499         /* ebx: no swapgs flag */
1500         DISABLE_INTERRUPTS(CLBR_NONE)
1501         testl %ebx,%ebx                         /* swapgs needed? */
1502         jnz nmi_restore
1503         testl $3,CS(%rsp)
1504         jnz nmi_userspace
1505 nmi_swapgs:
1506         SWAPGS_UNSAFE_STACK
1507 nmi_restore:
1508         RESTORE_ALL 8
1509         jmp irq_return
1510 nmi_userspace:
1511         GET_THREAD_INFO(%rcx)
1512         movl TI_flags(%rcx),%ebx
1513         andl $_TIF_WORK_MASK,%ebx
1514         jz nmi_swapgs
1515         movq %rsp,%rdi                  /* &pt_regs */
1516         call sync_regs
1517         movq %rax,%rsp                  /* switch stack for scheduling */
1518         testl $_TIF_NEED_RESCHED,%ebx
1519         jnz nmi_schedule
1520         movl %ebx,%edx                  /* arg3: thread flags */
1521         ENABLE_INTERRUPTS(CLBR_NONE)
1522         xorl %esi,%esi                  /* arg2: oldset */
1523         movq %rsp,%rdi                  /* arg1: &pt_regs */
1524         call do_notify_resume
1525         DISABLE_INTERRUPTS(CLBR_NONE)
1526         jmp nmi_userspace
1527 nmi_schedule:
1528         ENABLE_INTERRUPTS(CLBR_ANY)
1529         call schedule
1530         DISABLE_INTERRUPTS(CLBR_ANY)
1531         jmp nmi_userspace
1532         CFI_ENDPROC
1533 #else
1534         jmp paranoid_exit
1535         CFI_ENDPROC
1536 #endif
1537 END(nmi)
1538 
1539 ENTRY(ignore_sysret)
1540         CFI_STARTPROC
1541         mov $-ENOSYS,%eax
1542         sysret
1543         CFI_ENDPROC
1544 END(ignore_sysret)
1545 
1546 /*
1547  * End of kprobes section
1548  */
1549         .popsection

This page was automatically generated by LXR 0.3.1 (source).  •  Linux is a registered trademark of Linus Torvalds  •  Contact us