Version:  2.0.40 2.2.26 2.4.37 3.8 3.9 3.10 3.11 3.12 3.13 3.14 3.15 3.16 3.17 3.18 3.19 4.0 4.1 4.2 4.3 4.4 4.5

Linux/arch/x86/kernel/process_64.c

  1 /*
  2  *  Copyright (C) 1995  Linus Torvalds
  3  *
  4  *  Pentium III FXSR, SSE support
  5  *      Gareth Hughes <gareth@valinux.com>, May 2000
  6  *
  7  *  X86-64 port
  8  *      Andi Kleen.
  9  *
 10  *      CPU hotplug support - ashok.raj@intel.com
 11  */
 12 
 13 /*
 14  * This file handles the architecture-dependent parts of process handling..
 15  */
 16 
 17 #include <linux/cpu.h>
 18 #include <linux/errno.h>
 19 #include <linux/sched.h>
 20 #include <linux/fs.h>
 21 #include <linux/kernel.h>
 22 #include <linux/mm.h>
 23 #include <linux/elfcore.h>
 24 #include <linux/smp.h>
 25 #include <linux/slab.h>
 26 #include <linux/user.h>
 27 #include <linux/interrupt.h>
 28 #include <linux/delay.h>
 29 #include <linux/module.h>
 30 #include <linux/ptrace.h>
 31 #include <linux/notifier.h>
 32 #include <linux/kprobes.h>
 33 #include <linux/kdebug.h>
 34 #include <linux/prctl.h>
 35 #include <linux/uaccess.h>
 36 #include <linux/io.h>
 37 #include <linux/ftrace.h>
 38 
 39 #include <asm/pgtable.h>
 40 #include <asm/processor.h>
 41 #include <asm/fpu/internal.h>
 42 #include <asm/mmu_context.h>
 43 #include <asm/prctl.h>
 44 #include <asm/desc.h>
 45 #include <asm/proto.h>
 46 #include <asm/ia32.h>
 47 #include <asm/idle.h>
 48 #include <asm/syscalls.h>
 49 #include <asm/debugreg.h>
 50 #include <asm/switch_to.h>
 51 
 52 asmlinkage extern void ret_from_fork(void);
 53 
 54 __visible DEFINE_PER_CPU(unsigned long, rsp_scratch);
 55 
 56 /* Prints also some state that isn't saved in the pt_regs */
 57 void __show_regs(struct pt_regs *regs, int all)
 58 {
 59         unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
 60         unsigned long d0, d1, d2, d3, d6, d7;
 61         unsigned int fsindex, gsindex;
 62         unsigned int ds, cs, es;
 63 
 64         printk(KERN_DEFAULT "RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip);
 65         printk_address(regs->ip);
 66         printk(KERN_DEFAULT "RSP: %04lx:%016lx  EFLAGS: %08lx\n", regs->ss,
 67                         regs->sp, regs->flags);
 68         printk(KERN_DEFAULT "RAX: %016lx RBX: %016lx RCX: %016lx\n",
 69                regs->ax, regs->bx, regs->cx);
 70         printk(KERN_DEFAULT "RDX: %016lx RSI: %016lx RDI: %016lx\n",
 71                regs->dx, regs->si, regs->di);
 72         printk(KERN_DEFAULT "RBP: %016lx R08: %016lx R09: %016lx\n",
 73                regs->bp, regs->r8, regs->r9);
 74         printk(KERN_DEFAULT "R10: %016lx R11: %016lx R12: %016lx\n",
 75                regs->r10, regs->r11, regs->r12);
 76         printk(KERN_DEFAULT "R13: %016lx R14: %016lx R15: %016lx\n",
 77                regs->r13, regs->r14, regs->r15);
 78 
 79         asm("movl %%ds,%0" : "=r" (ds));
 80         asm("movl %%cs,%0" : "=r" (cs));
 81         asm("movl %%es,%0" : "=r" (es));
 82         asm("movl %%fs,%0" : "=r" (fsindex));
 83         asm("movl %%gs,%0" : "=r" (gsindex));
 84 
 85         rdmsrl(MSR_FS_BASE, fs);
 86         rdmsrl(MSR_GS_BASE, gs);
 87         rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
 88 
 89         if (!all)
 90                 return;
 91 
 92         cr0 = read_cr0();
 93         cr2 = read_cr2();
 94         cr3 = read_cr3();
 95         cr4 = __read_cr4();
 96 
 97         printk(KERN_DEFAULT "FS:  %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
 98                fs, fsindex, gs, gsindex, shadowgs);
 99         printk(KERN_DEFAULT "CS:  %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds,
100                         es, cr0);
101         printk(KERN_DEFAULT "CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3,
102                         cr4);
103 
104         get_debugreg(d0, 0);
105         get_debugreg(d1, 1);
106         get_debugreg(d2, 2);
107         get_debugreg(d3, 3);
108         get_debugreg(d6, 6);
109         get_debugreg(d7, 7);
110 
111         /* Only print out debug registers if they are in their non-default state. */
112         if ((d0 == 0) && (d1 == 0) && (d2 == 0) && (d3 == 0) &&
113             (d6 == DR6_RESERVED) && (d7 == 0x400))
114                 return;
115 
116         printk(KERN_DEFAULT "DR0: %016lx DR1: %016lx DR2: %016lx\n", d0, d1, d2);
117         printk(KERN_DEFAULT "DR3: %016lx DR6: %016lx DR7: %016lx\n", d3, d6, d7);
118 
119 }
120 
121 void release_thread(struct task_struct *dead_task)
122 {
123         if (dead_task->mm) {
124 #ifdef CONFIG_MODIFY_LDT_SYSCALL
125                 if (dead_task->mm->context.ldt) {
126                         pr_warn("WARNING: dead process %s still has LDT? <%p/%d>\n",
127                                 dead_task->comm,
128                                 dead_task->mm->context.ldt->entries,
129                                 dead_task->mm->context.ldt->size);
130                         BUG();
131                 }
132 #endif
133         }
134 }
135 
136 static inline void set_32bit_tls(struct task_struct *t, int tls, u32 addr)
137 {
138         struct user_desc ud = {
139                 .base_addr = addr,
140                 .limit = 0xfffff,
141                 .seg_32bit = 1,
142                 .limit_in_pages = 1,
143                 .useable = 1,
144         };
145         struct desc_struct *desc = t->thread.tls_array;
146         desc += tls;
147         fill_ldt(desc, &ud);
148 }
149 
150 static inline u32 read_32bit_tls(struct task_struct *t, int tls)
151 {
152         return get_desc_base(&t->thread.tls_array[tls]);
153 }
154 
155 int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
156                 unsigned long arg, struct task_struct *p, unsigned long tls)
157 {
158         int err;
159         struct pt_regs *childregs;
160         struct task_struct *me = current;
161 
162         p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE;
163         childregs = task_pt_regs(p);
164         p->thread.sp = (unsigned long) childregs;
165         set_tsk_thread_flag(p, TIF_FORK);
166         p->thread.io_bitmap_ptr = NULL;
167 
168         savesegment(gs, p->thread.gsindex);
169         p->thread.gs = p->thread.gsindex ? 0 : me->thread.gs;
170         savesegment(fs, p->thread.fsindex);
171         p->thread.fs = p->thread.fsindex ? 0 : me->thread.fs;
172         savesegment(es, p->thread.es);
173         savesegment(ds, p->thread.ds);
174         memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
175 
176         if (unlikely(p->flags & PF_KTHREAD)) {
177                 /* kernel thread */
178                 memset(childregs, 0, sizeof(struct pt_regs));
179                 childregs->sp = (unsigned long)childregs;
180                 childregs->ss = __KERNEL_DS;
181                 childregs->bx = sp; /* function */
182                 childregs->bp = arg;
183                 childregs->orig_ax = -1;
184                 childregs->cs = __KERNEL_CS | get_kernel_rpl();
185                 childregs->flags = X86_EFLAGS_IF | X86_EFLAGS_FIXED;
186                 return 0;
187         }
188         *childregs = *current_pt_regs();
189 
190         childregs->ax = 0;
191         if (sp)
192                 childregs->sp = sp;
193 
194         err = -ENOMEM;
195         if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) {
196                 p->thread.io_bitmap_ptr = kmemdup(me->thread.io_bitmap_ptr,
197                                                   IO_BITMAP_BYTES, GFP_KERNEL);
198                 if (!p->thread.io_bitmap_ptr) {
199                         p->thread.io_bitmap_max = 0;
200                         return -ENOMEM;
201                 }
202                 set_tsk_thread_flag(p, TIF_IO_BITMAP);
203         }
204 
205         /*
206          * Set a new TLS for the child thread?
207          */
208         if (clone_flags & CLONE_SETTLS) {
209 #ifdef CONFIG_IA32_EMULATION
210                 if (is_ia32_task())
211                         err = do_set_thread_area(p, -1,
212                                 (struct user_desc __user *)tls, 0);
213                 else
214 #endif
215                         err = do_arch_prctl(p, ARCH_SET_FS, tls);
216                 if (err)
217                         goto out;
218         }
219         err = 0;
220 out:
221         if (err && p->thread.io_bitmap_ptr) {
222                 kfree(p->thread.io_bitmap_ptr);
223                 p->thread.io_bitmap_max = 0;
224         }
225 
226         return err;
227 }
228 
229 static void
230 start_thread_common(struct pt_regs *regs, unsigned long new_ip,
231                     unsigned long new_sp,
232                     unsigned int _cs, unsigned int _ss, unsigned int _ds)
233 {
234         loadsegment(fs, 0);
235         loadsegment(es, _ds);
236         loadsegment(ds, _ds);
237         load_gs_index(0);
238         regs->ip                = new_ip;
239         regs->sp                = new_sp;
240         regs->cs                = _cs;
241         regs->ss                = _ss;
242         regs->flags             = X86_EFLAGS_IF;
243         force_iret();
244 }
245 
246 void
247 start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
248 {
249         start_thread_common(regs, new_ip, new_sp,
250                             __USER_CS, __USER_DS, 0);
251 }
252 
253 #ifdef CONFIG_COMPAT
254 void compat_start_thread(struct pt_regs *regs, u32 new_ip, u32 new_sp)
255 {
256         start_thread_common(regs, new_ip, new_sp,
257                             test_thread_flag(TIF_X32)
258                             ? __USER_CS : __USER32_CS,
259                             __USER_DS, __USER_DS);
260 }
261 #endif
262 
263 /*
264  *      switch_to(x,y) should switch tasks from x to y.
265  *
266  * This could still be optimized:
267  * - fold all the options into a flag word and test it with a single test.
268  * - could test fs/gs bitsliced
269  *
270  * Kprobes not supported here. Set the probe on schedule instead.
271  * Function graph tracer not supported too.
272  */
273 __visible __notrace_funcgraph struct task_struct *
274 __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
275 {
276         struct thread_struct *prev = &prev_p->thread;
277         struct thread_struct *next = &next_p->thread;
278         struct fpu *prev_fpu = &prev->fpu;
279         struct fpu *next_fpu = &next->fpu;
280         int cpu = smp_processor_id();
281         struct tss_struct *tss = &per_cpu(cpu_tss, cpu);
282         unsigned fsindex, gsindex;
283         fpu_switch_t fpu_switch;
284 
285         fpu_switch = switch_fpu_prepare(prev_fpu, next_fpu, cpu);
286 
287         /* We must save %fs and %gs before load_TLS() because
288          * %fs and %gs may be cleared by load_TLS().
289          *
290          * (e.g. xen_load_tls())
291          */
292         savesegment(fs, fsindex);
293         savesegment(gs, gsindex);
294 
295         /*
296          * Load TLS before restoring any segments so that segment loads
297          * reference the correct GDT entries.
298          */
299         load_TLS(next, cpu);
300 
301         /*
302          * Leave lazy mode, flushing any hypercalls made here.  This
303          * must be done after loading TLS entries in the GDT but before
304          * loading segments that might reference them, and and it must
305          * be done before fpu__restore(), so the TS bit is up to
306          * date.
307          */
308         arch_end_context_switch(next_p);
309 
310         /* Switch DS and ES.
311          *
312          * Reading them only returns the selectors, but writing them (if
313          * nonzero) loads the full descriptor from the GDT or LDT.  The
314          * LDT for next is loaded in switch_mm, and the GDT is loaded
315          * above.
316          *
317          * We therefore need to write new values to the segment
318          * registers on every context switch unless both the new and old
319          * values are zero.
320          *
321          * Note that we don't need to do anything for CS and SS, as
322          * those are saved and restored as part of pt_regs.
323          */
324         savesegment(es, prev->es);
325         if (unlikely(next->es | prev->es))
326                 loadsegment(es, next->es);
327 
328         savesegment(ds, prev->ds);
329         if (unlikely(next->ds | prev->ds))
330                 loadsegment(ds, next->ds);
331 
332         /*
333          * Switch FS and GS.
334          *
335          * These are even more complicated than DS and ES: they have
336          * 64-bit bases are that controlled by arch_prctl.  Those bases
337          * only differ from the values in the GDT or LDT if the selector
338          * is 0.
339          *
340          * Loading the segment register resets the hidden base part of
341          * the register to 0 or the value from the GDT / LDT.  If the
342          * next base address zero, writing 0 to the segment register is
343          * much faster than using wrmsr to explicitly zero the base.
344          *
345          * The thread_struct.fs and thread_struct.gs values are 0
346          * if the fs and gs bases respectively are not overridden
347          * from the values implied by fsindex and gsindex.  They
348          * are nonzero, and store the nonzero base addresses, if
349          * the bases are overridden.
350          *
351          * (fs != 0 && fsindex != 0) || (gs != 0 && gsindex != 0) should
352          * be impossible.
353          *
354          * Therefore we need to reload the segment registers if either
355          * the old or new selector is nonzero, and we need to override
356          * the base address if next thread expects it to be overridden.
357          *
358          * This code is unnecessarily slow in the case where the old and
359          * new indexes are zero and the new base is nonzero -- it will
360          * unnecessarily write 0 to the selector before writing the new
361          * base address.
362          *
363          * Note: This all depends on arch_prctl being the only way that
364          * user code can override the segment base.  Once wrfsbase and
365          * wrgsbase are enabled, most of this code will need to change.
366          */
367         if (unlikely(fsindex | next->fsindex | prev->fs)) {
368                 loadsegment(fs, next->fsindex);
369 
370                 /*
371                  * If user code wrote a nonzero value to FS, then it also
372                  * cleared the overridden base address.
373                  *
374                  * XXX: if user code wrote 0 to FS and cleared the base
375                  * address itself, we won't notice and we'll incorrectly
376                  * restore the prior base address next time we reschdule
377                  * the process.
378                  */
379                 if (fsindex)
380                         prev->fs = 0;
381         }
382         if (next->fs)
383                 wrmsrl(MSR_FS_BASE, next->fs);
384         prev->fsindex = fsindex;
385 
386         if (unlikely(gsindex | next->gsindex | prev->gs)) {
387                 load_gs_index(next->gsindex);
388 
389                 /* This works (and fails) the same way as fsindex above. */
390                 if (gsindex)
391                         prev->gs = 0;
392         }
393         if (next->gs)
394                 wrmsrl(MSR_KERNEL_GS_BASE, next->gs);
395         prev->gsindex = gsindex;
396 
397         switch_fpu_finish(next_fpu, fpu_switch);
398 
399         /*
400          * Switch the PDA and FPU contexts.
401          */
402         this_cpu_write(current_task, next_p);
403 
404         /* Reload esp0 and ss1.  This changes current_thread_info(). */
405         load_sp0(tss, next);
406 
407         /*
408          * Now maybe reload the debug registers and handle I/O bitmaps
409          */
410         if (unlikely(task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT ||
411                      task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV))
412                 __switch_to_xtra(prev_p, next_p, tss);
413 
414         if (static_cpu_has_bug(X86_BUG_SYSRET_SS_ATTRS)) {
415                 /*
416                  * AMD CPUs have a misfeature: SYSRET sets the SS selector but
417                  * does not update the cached descriptor.  As a result, if we
418                  * do SYSRET while SS is NULL, we'll end up in user mode with
419                  * SS apparently equal to __USER_DS but actually unusable.
420                  *
421                  * The straightforward workaround would be to fix it up just
422                  * before SYSRET, but that would slow down the system call
423                  * fast paths.  Instead, we ensure that SS is never NULL in
424                  * system call context.  We do this by replacing NULL SS
425                  * selectors at every context switch.  SYSCALL sets up a valid
426                  * SS, so the only way to get NULL is to re-enter the kernel
427                  * from CPL 3 through an interrupt.  Since that can't happen
428                  * in the same task as a running syscall, we are guaranteed to
429                  * context switch between every interrupt vector entry and a
430                  * subsequent SYSRET.
431                  *
432                  * We read SS first because SS reads are much faster than
433                  * writes.  Out of caution, we force SS to __KERNEL_DS even if
434                  * it previously had a different non-NULL value.
435                  */
436                 unsigned short ss_sel;
437                 savesegment(ss, ss_sel);
438                 if (ss_sel != __KERNEL_DS)
439                         loadsegment(ss, __KERNEL_DS);
440         }
441 
442         return prev_p;
443 }
444 
445 void set_personality_64bit(void)
446 {
447         /* inherit personality from parent */
448 
449         /* Make sure to be in 64bit mode */
450         clear_thread_flag(TIF_IA32);
451         clear_thread_flag(TIF_ADDR32);
452         clear_thread_flag(TIF_X32);
453 
454         /* Ensure the corresponding mm is not marked. */
455         if (current->mm)
456                 current->mm->context.ia32_compat = 0;
457 
458         /* TBD: overwrites user setup. Should have two bits.
459            But 64bit processes have always behaved this way,
460            so it's not too bad. The main problem is just that
461            32bit childs are affected again. */
462         current->personality &= ~READ_IMPLIES_EXEC;
463 }
464 
465 void set_personality_ia32(bool x32)
466 {
467         /* inherit personality from parent */
468 
469         /* Make sure to be in 32bit mode */
470         set_thread_flag(TIF_ADDR32);
471 
472         /* Mark the associated mm as containing 32-bit tasks. */
473         if (x32) {
474                 clear_thread_flag(TIF_IA32);
475                 set_thread_flag(TIF_X32);
476                 if (current->mm)
477                         current->mm->context.ia32_compat = TIF_X32;
478                 current->personality &= ~READ_IMPLIES_EXEC;
479                 /* is_compat_task() uses the presence of the x32
480                    syscall bit flag to determine compat status */
481                 current_thread_info()->status &= ~TS_COMPAT;
482         } else {
483                 set_thread_flag(TIF_IA32);
484                 clear_thread_flag(TIF_X32);
485                 if (current->mm)
486                         current->mm->context.ia32_compat = TIF_IA32;
487                 current->personality |= force_personality32;
488                 /* Prepare the first "return" to user space */
489                 current_thread_info()->status |= TS_COMPAT;
490         }
491 }
492 EXPORT_SYMBOL_GPL(set_personality_ia32);
493 
494 long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
495 {
496         int ret = 0;
497         int doit = task == current;
498         int cpu;
499 
500         switch (code) {
501         case ARCH_SET_GS:
502                 if (addr >= TASK_SIZE_OF(task))
503                         return -EPERM;
504                 cpu = get_cpu();
505                 /* handle small bases via the GDT because that's faster to
506                    switch. */
507                 if (addr <= 0xffffffff) {
508                         set_32bit_tls(task, GS_TLS, addr);
509                         if (doit) {
510                                 load_TLS(&task->thread, cpu);
511                                 load_gs_index(GS_TLS_SEL);
512                         }
513                         task->thread.gsindex = GS_TLS_SEL;
514                         task->thread.gs = 0;
515                 } else {
516                         task->thread.gsindex = 0;
517                         task->thread.gs = addr;
518                         if (doit) {
519                                 load_gs_index(0);
520                                 ret = wrmsrl_safe(MSR_KERNEL_GS_BASE, addr);
521                         }
522                 }
523                 put_cpu();
524                 break;
525         case ARCH_SET_FS:
526                 /* Not strictly needed for fs, but do it for symmetry
527                    with gs */
528                 if (addr >= TASK_SIZE_OF(task))
529                         return -EPERM;
530                 cpu = get_cpu();
531                 /* handle small bases via the GDT because that's faster to
532                    switch. */
533                 if (addr <= 0xffffffff) {
534                         set_32bit_tls(task, FS_TLS, addr);
535                         if (doit) {
536                                 load_TLS(&task->thread, cpu);
537                                 loadsegment(fs, FS_TLS_SEL);
538                         }
539                         task->thread.fsindex = FS_TLS_SEL;
540                         task->thread.fs = 0;
541                 } else {
542                         task->thread.fsindex = 0;
543                         task->thread.fs = addr;
544                         if (doit) {
545                                 /* set the selector to 0 to not confuse
546                                    __switch_to */
547                                 loadsegment(fs, 0);
548                                 ret = wrmsrl_safe(MSR_FS_BASE, addr);
549                         }
550                 }
551                 put_cpu();
552                 break;
553         case ARCH_GET_FS: {
554                 unsigned long base;
555                 if (task->thread.fsindex == FS_TLS_SEL)
556                         base = read_32bit_tls(task, FS_TLS);
557                 else if (doit)
558                         rdmsrl(MSR_FS_BASE, base);
559                 else
560                         base = task->thread.fs;
561                 ret = put_user(base, (unsigned long __user *)addr);
562                 break;
563         }
564         case ARCH_GET_GS: {
565                 unsigned long base;
566                 unsigned gsindex;
567                 if (task->thread.gsindex == GS_TLS_SEL)
568                         base = read_32bit_tls(task, GS_TLS);
569                 else if (doit) {
570                         savesegment(gs, gsindex);
571                         if (gsindex)
572                                 rdmsrl(MSR_KERNEL_GS_BASE, base);
573                         else
574                                 base = task->thread.gs;
575                 } else
576                         base = task->thread.gs;
577                 ret = put_user(base, (unsigned long __user *)addr);
578                 break;
579         }
580 
581         default:
582                 ret = -EINVAL;
583                 break;
584         }
585 
586         return ret;
587 }
588 
589 long sys_arch_prctl(int code, unsigned long addr)
590 {
591         return do_arch_prctl(current, code, addr);
592 }
593 
594 unsigned long KSTK_ESP(struct task_struct *task)
595 {
596         return task_pt_regs(task)->sp;
597 }
598 

This page was automatically generated by LXR 0.3.1 (source).  •  Linux is a registered trademark of Linus Torvalds  •  Contact us