Version:  2.0.40 2.2.26 2.4.37 3.6 3.7 3.8 3.9 3.10 3.11 3.12 3.13 3.14 3.15 3.16 3.17 3.18 3.19 4.0 4.1 4.2

Linux/arch/x86/kernel/process.c

  1 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  2 
  3 #include <linux/errno.h>
  4 #include <linux/kernel.h>
  5 #include <linux/mm.h>
  6 #include <linux/smp.h>
  7 #include <linux/prctl.h>
  8 #include <linux/slab.h>
  9 #include <linux/sched.h>
 10 #include <linux/module.h>
 11 #include <linux/pm.h>
 12 #include <linux/tick.h>
 13 #include <linux/random.h>
 14 #include <linux/user-return-notifier.h>
 15 #include <linux/dmi.h>
 16 #include <linux/utsname.h>
 17 #include <linux/stackprotector.h>
 18 #include <linux/tick.h>
 19 #include <linux/cpuidle.h>
 20 #include <trace/events/power.h>
 21 #include <linux/hw_breakpoint.h>
 22 #include <asm/cpu.h>
 23 #include <asm/apic.h>
 24 #include <asm/syscalls.h>
 25 #include <asm/idle.h>
 26 #include <asm/uaccess.h>
 27 #include <asm/mwait.h>
 28 #include <asm/fpu/internal.h>
 29 #include <asm/debugreg.h>
 30 #include <asm/nmi.h>
 31 #include <asm/tlbflush.h>
 32 
 33 /*
 34  * per-CPU TSS segments. Threads are completely 'soft' on Linux,
 35  * no more per-task TSS's. The TSS size is kept cacheline-aligned
 36  * so they are allowed to end up in the .data..cacheline_aligned
 37  * section. Since TSS's are completely CPU-local, we want them
 38  * on exact cacheline boundaries, to eliminate cacheline ping-pong.
 39  */
 40 __visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = {
 41         .x86_tss = {
 42                 .sp0 = TOP_OF_INIT_STACK,
 43 #ifdef CONFIG_X86_32
 44                 .ss0 = __KERNEL_DS,
 45                 .ss1 = __KERNEL_CS,
 46                 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET,
 47 #endif
 48          },
 49 #ifdef CONFIG_X86_32
 50          /*
 51           * Note that the .io_bitmap member must be extra-big. This is because
 52           * the CPU will access an additional byte beyond the end of the IO
 53           * permission bitmap. The extra byte must be all 1 bits, and must
 54           * be within the limit.
 55           */
 56         .io_bitmap              = { [0 ... IO_BITMAP_LONGS] = ~0 },
 57 #endif
 58 };
 59 EXPORT_PER_CPU_SYMBOL(cpu_tss);
 60 
 61 #ifdef CONFIG_X86_64
 62 static DEFINE_PER_CPU(unsigned char, is_idle);
 63 static ATOMIC_NOTIFIER_HEAD(idle_notifier);
 64 
 65 void idle_notifier_register(struct notifier_block *n)
 66 {
 67         atomic_notifier_chain_register(&idle_notifier, n);
 68 }
 69 EXPORT_SYMBOL_GPL(idle_notifier_register);
 70 
 71 void idle_notifier_unregister(struct notifier_block *n)
 72 {
 73         atomic_notifier_chain_unregister(&idle_notifier, n);
 74 }
 75 EXPORT_SYMBOL_GPL(idle_notifier_unregister);
 76 #endif
 77 
 78 /*
 79  * this gets called so that we can store lazy state into memory and copy the
 80  * current task into the new thread.
 81  */
 82 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
 83 {
 84         memcpy(dst, src, arch_task_struct_size);
 85 
 86         return fpu__copy(&dst->thread.fpu, &src->thread.fpu);
 87 }
 88 
 89 /*
 90  * Free current thread data structures etc..
 91  */
 92 void exit_thread(void)
 93 {
 94         struct task_struct *me = current;
 95         struct thread_struct *t = &me->thread;
 96         unsigned long *bp = t->io_bitmap_ptr;
 97         struct fpu *fpu = &t->fpu;
 98 
 99         if (bp) {
100                 struct tss_struct *tss = &per_cpu(cpu_tss, get_cpu());
101 
102                 t->io_bitmap_ptr = NULL;
103                 clear_thread_flag(TIF_IO_BITMAP);
104                 /*
105                  * Careful, clear this in the TSS too:
106                  */
107                 memset(tss->io_bitmap, 0xff, t->io_bitmap_max);
108                 t->io_bitmap_max = 0;
109                 put_cpu();
110                 kfree(bp);
111         }
112 
113         fpu__drop(fpu);
114 }
115 
116 void flush_thread(void)
117 {
118         struct task_struct *tsk = current;
119 
120         flush_ptrace_hw_breakpoint(tsk);
121         memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
122 
123         fpu__clear(&tsk->thread.fpu);
124 }
125 
126 static void hard_disable_TSC(void)
127 {
128         cr4_set_bits(X86_CR4_TSD);
129 }
130 
131 void disable_TSC(void)
132 {
133         preempt_disable();
134         if (!test_and_set_thread_flag(TIF_NOTSC))
135                 /*
136                  * Must flip the CPU state synchronously with
137                  * TIF_NOTSC in the current running context.
138                  */
139                 hard_disable_TSC();
140         preempt_enable();
141 }
142 
143 static void hard_enable_TSC(void)
144 {
145         cr4_clear_bits(X86_CR4_TSD);
146 }
147 
148 static void enable_TSC(void)
149 {
150         preempt_disable();
151         if (test_and_clear_thread_flag(TIF_NOTSC))
152                 /*
153                  * Must flip the CPU state synchronously with
154                  * TIF_NOTSC in the current running context.
155                  */
156                 hard_enable_TSC();
157         preempt_enable();
158 }
159 
160 int get_tsc_mode(unsigned long adr)
161 {
162         unsigned int val;
163 
164         if (test_thread_flag(TIF_NOTSC))
165                 val = PR_TSC_SIGSEGV;
166         else
167                 val = PR_TSC_ENABLE;
168 
169         return put_user(val, (unsigned int __user *)adr);
170 }
171 
172 int set_tsc_mode(unsigned int val)
173 {
174         if (val == PR_TSC_SIGSEGV)
175                 disable_TSC();
176         else if (val == PR_TSC_ENABLE)
177                 enable_TSC();
178         else
179                 return -EINVAL;
180 
181         return 0;
182 }
183 
184 void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
185                       struct tss_struct *tss)
186 {
187         struct thread_struct *prev, *next;
188 
189         prev = &prev_p->thread;
190         next = &next_p->thread;
191 
192         if (test_tsk_thread_flag(prev_p, TIF_BLOCKSTEP) ^
193             test_tsk_thread_flag(next_p, TIF_BLOCKSTEP)) {
194                 unsigned long debugctl = get_debugctlmsr();
195 
196                 debugctl &= ~DEBUGCTLMSR_BTF;
197                 if (test_tsk_thread_flag(next_p, TIF_BLOCKSTEP))
198                         debugctl |= DEBUGCTLMSR_BTF;
199 
200                 update_debugctlmsr(debugctl);
201         }
202 
203         if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^
204             test_tsk_thread_flag(next_p, TIF_NOTSC)) {
205                 /* prev and next are different */
206                 if (test_tsk_thread_flag(next_p, TIF_NOTSC))
207                         hard_disable_TSC();
208                 else
209                         hard_enable_TSC();
210         }
211 
212         if (test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
213                 /*
214                  * Copy the relevant range of the IO bitmap.
215                  * Normally this is 128 bytes or less:
216                  */
217                 memcpy(tss->io_bitmap, next->io_bitmap_ptr,
218                        max(prev->io_bitmap_max, next->io_bitmap_max));
219         } else if (test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)) {
220                 /*
221                  * Clear any possible leftover bits:
222                  */
223                 memset(tss->io_bitmap, 0xff, prev->io_bitmap_max);
224         }
225         propagate_user_return_notify(prev_p, next_p);
226 }
227 
228 /*
229  * Idle related variables and functions
230  */
231 unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE;
232 EXPORT_SYMBOL(boot_option_idle_override);
233 
234 static void (*x86_idle)(void);
235 
236 #ifndef CONFIG_SMP
237 static inline void play_dead(void)
238 {
239         BUG();
240 }
241 #endif
242 
243 #ifdef CONFIG_X86_64
244 void enter_idle(void)
245 {
246         this_cpu_write(is_idle, 1);
247         atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
248 }
249 
250 static void __exit_idle(void)
251 {
252         if (x86_test_and_clear_bit_percpu(0, is_idle) == 0)
253                 return;
254         atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
255 }
256 
257 /* Called from interrupts to signify idle end */
258 void exit_idle(void)
259 {
260         /* idle loop has pid 0 */
261         if (current->pid)
262                 return;
263         __exit_idle();
264 }
265 #endif
266 
267 void arch_cpu_idle_enter(void)
268 {
269         local_touch_nmi();
270         enter_idle();
271 }
272 
273 void arch_cpu_idle_exit(void)
274 {
275         __exit_idle();
276 }
277 
278 void arch_cpu_idle_dead(void)
279 {
280         play_dead();
281 }
282 
283 /*
284  * Called from the generic idle code.
285  */
286 void arch_cpu_idle(void)
287 {
288         x86_idle();
289 }
290 
291 /*
292  * We use this if we don't have any better idle routine..
293  */
294 void default_idle(void)
295 {
296         trace_cpu_idle_rcuidle(1, smp_processor_id());
297         safe_halt();
298         trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
299 }
300 #ifdef CONFIG_APM_MODULE
301 EXPORT_SYMBOL(default_idle);
302 #endif
303 
304 #ifdef CONFIG_XEN
305 bool xen_set_default_idle(void)
306 {
307         bool ret = !!x86_idle;
308 
309         x86_idle = default_idle;
310 
311         return ret;
312 }
313 #endif
314 void stop_this_cpu(void *dummy)
315 {
316         local_irq_disable();
317         /*
318          * Remove this CPU:
319          */
320         set_cpu_online(smp_processor_id(), false);
321         disable_local_APIC();
322 
323         for (;;)
324                 halt();
325 }
326 
327 bool amd_e400_c1e_detected;
328 EXPORT_SYMBOL(amd_e400_c1e_detected);
329 
330 static cpumask_var_t amd_e400_c1e_mask;
331 
332 void amd_e400_remove_cpu(int cpu)
333 {
334         if (amd_e400_c1e_mask != NULL)
335                 cpumask_clear_cpu(cpu, amd_e400_c1e_mask);
336 }
337 
338 /*
339  * AMD Erratum 400 aware idle routine. We check for C1E active in the interrupt
340  * pending message MSR. If we detect C1E, then we handle it the same
341  * way as C3 power states (local apic timer and TSC stop)
342  */
343 static void amd_e400_idle(void)
344 {
345         if (!amd_e400_c1e_detected) {
346                 u32 lo, hi;
347 
348                 rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi);
349 
350                 if (lo & K8_INTP_C1E_ACTIVE_MASK) {
351                         amd_e400_c1e_detected = true;
352                         if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
353                                 mark_tsc_unstable("TSC halt in AMD C1E");
354                         pr_info("System has AMD C1E enabled\n");
355                 }
356         }
357 
358         if (amd_e400_c1e_detected) {
359                 int cpu = smp_processor_id();
360 
361                 if (!cpumask_test_cpu(cpu, amd_e400_c1e_mask)) {
362                         cpumask_set_cpu(cpu, amd_e400_c1e_mask);
363                         /* Force broadcast so ACPI can not interfere. */
364                         tick_broadcast_force();
365                         pr_info("Switch to broadcast mode on CPU%d\n", cpu);
366                 }
367                 tick_broadcast_enter();
368 
369                 default_idle();
370 
371                 /*
372                  * The switch back from broadcast mode needs to be
373                  * called with interrupts disabled.
374                  */
375                 local_irq_disable();
376                 tick_broadcast_exit();
377                 local_irq_enable();
378         } else
379                 default_idle();
380 }
381 
382 /*
383  * Intel Core2 and older machines prefer MWAIT over HALT for C1.
384  * We can't rely on cpuidle installing MWAIT, because it will not load
385  * on systems that support only C1 -- so the boot default must be MWAIT.
386  *
387  * Some AMD machines are the opposite, they depend on using HALT.
388  *
389  * So for default C1, which is used during boot until cpuidle loads,
390  * use MWAIT-C1 on Intel HW that has it, else use HALT.
391  */
392 static int prefer_mwait_c1_over_halt(const struct cpuinfo_x86 *c)
393 {
394         if (c->x86_vendor != X86_VENDOR_INTEL)
395                 return 0;
396 
397         if (!cpu_has(c, X86_FEATURE_MWAIT))
398                 return 0;
399 
400         return 1;
401 }
402 
403 /*
404  * MONITOR/MWAIT with no hints, used for default C1 state. This invokes MWAIT
405  * with interrupts enabled and no flags, which is backwards compatible with the
406  * original MWAIT implementation.
407  */
408 static void mwait_idle(void)
409 {
410         if (!current_set_polling_and_test()) {
411                 trace_cpu_idle_rcuidle(1, smp_processor_id());
412                 if (this_cpu_has(X86_BUG_CLFLUSH_MONITOR)) {
413                         smp_mb(); /* quirk */
414                         clflush((void *)&current_thread_info()->flags);
415                         smp_mb(); /* quirk */
416                 }
417 
418                 __monitor((void *)&current_thread_info()->flags, 0, 0);
419                 if (!need_resched())
420                         __sti_mwait(0, 0);
421                 else
422                         local_irq_enable();
423                 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
424         } else {
425                 local_irq_enable();
426         }
427         __current_clr_polling();
428 }
429 
430 void select_idle_routine(const struct cpuinfo_x86 *c)
431 {
432 #ifdef CONFIG_SMP
433         if (boot_option_idle_override == IDLE_POLL && smp_num_siblings > 1)
434                 pr_warn_once("WARNING: polling idle and HT enabled, performance may degrade\n");
435 #endif
436         if (x86_idle || boot_option_idle_override == IDLE_POLL)
437                 return;
438 
439         if (cpu_has_bug(c, X86_BUG_AMD_APIC_C1E)) {
440                 /* E400: APIC timer interrupt does not wake up CPU from C1e */
441                 pr_info("using AMD E400 aware idle routine\n");
442                 x86_idle = amd_e400_idle;
443         } else if (prefer_mwait_c1_over_halt(c)) {
444                 pr_info("using mwait in idle threads\n");
445                 x86_idle = mwait_idle;
446         } else
447                 x86_idle = default_idle;
448 }
449 
450 void __init init_amd_e400_c1e_mask(void)
451 {
452         /* If we're using amd_e400_idle, we need to allocate amd_e400_c1e_mask. */
453         if (x86_idle == amd_e400_idle)
454                 zalloc_cpumask_var(&amd_e400_c1e_mask, GFP_KERNEL);
455 }
456 
457 static int __init idle_setup(char *str)
458 {
459         if (!str)
460                 return -EINVAL;
461 
462         if (!strcmp(str, "poll")) {
463                 pr_info("using polling idle threads\n");
464                 boot_option_idle_override = IDLE_POLL;
465                 cpu_idle_poll_ctrl(true);
466         } else if (!strcmp(str, "halt")) {
467                 /*
468                  * When the boot option of idle=halt is added, halt is
469                  * forced to be used for CPU idle. In such case CPU C2/C3
470                  * won't be used again.
471                  * To continue to load the CPU idle driver, don't touch
472                  * the boot_option_idle_override.
473                  */
474                 x86_idle = default_idle;
475                 boot_option_idle_override = IDLE_HALT;
476         } else if (!strcmp(str, "nomwait")) {
477                 /*
478                  * If the boot option of "idle=nomwait" is added,
479                  * it means that mwait will be disabled for CPU C2/C3
480                  * states. In such case it won't touch the variable
481                  * of boot_option_idle_override.
482                  */
483                 boot_option_idle_override = IDLE_NOMWAIT;
484         } else
485                 return -1;
486 
487         return 0;
488 }
489 early_param("idle", idle_setup);
490 
491 unsigned long arch_align_stack(unsigned long sp)
492 {
493         if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
494                 sp -= get_random_int() % 8192;
495         return sp & ~0xf;
496 }
497 
498 unsigned long arch_randomize_brk(struct mm_struct *mm)
499 {
500         unsigned long range_end = mm->brk + 0x02000000;
501         return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
502 }
503 
504 

This page was automatically generated by LXR 0.3.1 (source).  •  Linux is a registered trademark of Linus Torvalds  •  Contact us