Version:  2.6.34 2.6.35 2.6.36 2.6.37 2.6.38 2.6.39 3.0 3.1 3.2 3.3 3.4 3.5 3.6 3.7 3.8 3.9 3.10 3.11 3.12 3.13 3.14

Linux/arch/x86/include/asm/processor.h

  1 #ifndef _ASM_X86_PROCESSOR_H
  2 #define _ASM_X86_PROCESSOR_H
  3 
  4 #include <asm/processor-flags.h>
  5 
  6 /* Forward declaration, a strange C thing */
  7 struct task_struct;
  8 struct mm_struct;
  9 
 10 #include <asm/vm86.h>
 11 #include <asm/math_emu.h>
 12 #include <asm/segment.h>
 13 #include <asm/types.h>
 14 #include <asm/sigcontext.h>
 15 #include <asm/current.h>
 16 #include <asm/cpufeature.h>
 17 #include <asm/page.h>
 18 #include <asm/pgtable_types.h>
 19 #include <asm/percpu.h>
 20 #include <asm/msr.h>
 21 #include <asm/desc_defs.h>
 22 #include <asm/nops.h>
 23 #include <asm/special_insns.h>
 24 
 25 #include <linux/personality.h>
 26 #include <linux/cpumask.h>
 27 #include <linux/cache.h>
 28 #include <linux/threads.h>
 29 #include <linux/math64.h>
 30 #include <linux/err.h>
 31 #include <linux/irqflags.h>
 32 
 33 /*
 34  * We handle most unaligned accesses in hardware.  On the other hand
 35  * unaligned DMA can be quite expensive on some Nehalem processors.
 36  *
 37  * Based on this we disable the IP header alignment in network drivers.
 38  */
 39 #define NET_IP_ALIGN    0
 40 
 41 #define HBP_NUM 4
 42 /*
 43  * Default implementation of macro that returns current
 44  * instruction pointer ("program counter").
 45  */
 46 static inline void *current_text_addr(void)
 47 {
 48         void *pc;
 49 
 50         asm volatile("mov $1f, %0; 1:":"=r" (pc));
 51 
 52         return pc;
 53 }
 54 
 55 #ifdef CONFIG_X86_VSMP
 56 # define ARCH_MIN_TASKALIGN             (1 << INTERNODE_CACHE_SHIFT)
 57 # define ARCH_MIN_MMSTRUCT_ALIGN        (1 << INTERNODE_CACHE_SHIFT)
 58 #else
 59 # define ARCH_MIN_TASKALIGN             16
 60 # define ARCH_MIN_MMSTRUCT_ALIGN        0
 61 #endif
 62 
 63 enum tlb_infos {
 64         ENTRIES,
 65         NR_INFO
 66 };
 67 
 68 extern u16 __read_mostly tlb_lli_4k[NR_INFO];
 69 extern u16 __read_mostly tlb_lli_2m[NR_INFO];
 70 extern u16 __read_mostly tlb_lli_4m[NR_INFO];
 71 extern u16 __read_mostly tlb_lld_4k[NR_INFO];
 72 extern u16 __read_mostly tlb_lld_2m[NR_INFO];
 73 extern u16 __read_mostly tlb_lld_4m[NR_INFO];
 74 extern u16 __read_mostly tlb_lld_1g[NR_INFO];
 75 extern s8  __read_mostly tlb_flushall_shift;
 76 
 77 /*
 78  *  CPU type and hardware bug flags. Kept separately for each CPU.
 79  *  Members of this structure are referenced in head.S, so think twice
 80  *  before touching them. [mj]
 81  */
 82 
 83 struct cpuinfo_x86 {
 84         __u8                    x86;            /* CPU family */
 85         __u8                    x86_vendor;     /* CPU vendor */
 86         __u8                    x86_model;
 87         __u8                    x86_mask;
 88 #ifdef CONFIG_X86_32
 89         char                    wp_works_ok;    /* It doesn't on 386's */
 90 
 91         /* Problems on some 486Dx4's and old 386's: */
 92         char                    rfu;
 93         char                    pad0;
 94         char                    pad1;
 95 #else
 96         /* Number of 4K pages in DTLB/ITLB combined(in pages): */
 97         int                     x86_tlbsize;
 98 #endif
 99         __u8                    x86_virt_bits;
100         __u8                    x86_phys_bits;
101         /* CPUID returned core id bits: */
102         __u8                    x86_coreid_bits;
103         /* Max extended CPUID function supported: */
104         __u32                   extended_cpuid_level;
105         /* Maximum supported CPUID level, -1=no CPUID: */
106         int                     cpuid_level;
107         __u32                   x86_capability[NCAPINTS + NBUGINTS];
108         char                    x86_vendor_id[16];
109         char                    x86_model_id[64];
110         /* in KB - valid for CPUS which support this call: */
111         int                     x86_cache_size;
112         int                     x86_cache_alignment;    /* In bytes */
113         int                     x86_power;
114         unsigned long           loops_per_jiffy;
115         /* cpuid returned max cores value: */
116         u16                      x86_max_cores;
117         u16                     apicid;
118         u16                     initial_apicid;
119         u16                     x86_clflush_size;
120         /* number of cores as seen by the OS: */
121         u16                     booted_cores;
122         /* Physical processor id: */
123         u16                     phys_proc_id;
124         /* Core id: */
125         u16                     cpu_core_id;
126         /* Compute unit id */
127         u8                      compute_unit_id;
128         /* Index into per_cpu list: */
129         u16                     cpu_index;
130         u32                     microcode;
131 } __attribute__((__aligned__(SMP_CACHE_BYTES)));
132 
133 #define X86_VENDOR_INTEL        0
134 #define X86_VENDOR_CYRIX        1
135 #define X86_VENDOR_AMD          2
136 #define X86_VENDOR_UMC          3
137 #define X86_VENDOR_CENTAUR      5
138 #define X86_VENDOR_TRANSMETA    7
139 #define X86_VENDOR_NSC          8
140 #define X86_VENDOR_NUM          9
141 
142 #define X86_VENDOR_UNKNOWN      0xff
143 
144 /*
145  * capabilities of CPUs
146  */
147 extern struct cpuinfo_x86       boot_cpu_data;
148 extern struct cpuinfo_x86       new_cpu_data;
149 
150 extern struct tss_struct        doublefault_tss;
151 extern __u32                    cpu_caps_cleared[NCAPINTS];
152 extern __u32                    cpu_caps_set[NCAPINTS];
153 
154 #ifdef CONFIG_SMP
155 DECLARE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
156 #define cpu_data(cpu)           per_cpu(cpu_info, cpu)
157 #else
158 #define cpu_info                boot_cpu_data
159 #define cpu_data(cpu)           boot_cpu_data
160 #endif
161 
162 extern const struct seq_operations cpuinfo_op;
163 
164 #define cache_line_size()       (boot_cpu_data.x86_cache_alignment)
165 
166 extern void cpu_detect(struct cpuinfo_x86 *c);
167 extern void fpu_detect(struct cpuinfo_x86 *c);
168 
169 extern void early_cpu_init(void);
170 extern void identify_boot_cpu(void);
171 extern void identify_secondary_cpu(struct cpuinfo_x86 *);
172 extern void print_cpu_info(struct cpuinfo_x86 *);
173 void print_cpu_msr(struct cpuinfo_x86 *);
174 extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c);
175 extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
176 extern void init_amd_cacheinfo(struct cpuinfo_x86 *c);
177 
178 extern void detect_extended_topology(struct cpuinfo_x86 *c);
179 extern void detect_ht(struct cpuinfo_x86 *c);
180 
181 #ifdef CONFIG_X86_32
182 extern int have_cpuid_p(void);
183 #else
184 static inline int have_cpuid_p(void)
185 {
186         return 1;
187 }
188 #endif
189 static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
190                                 unsigned int *ecx, unsigned int *edx)
191 {
192         /* ecx is often an input as well as an output. */
193         asm volatile("cpuid"
194             : "=a" (*eax),
195               "=b" (*ebx),
196               "=c" (*ecx),
197               "=d" (*edx)
198             : "" (*eax), "2" (*ecx)
199             : "memory");
200 }
201 
202 static inline void load_cr3(pgd_t *pgdir)
203 {
204         write_cr3(__pa(pgdir));
205 }
206 
207 #ifdef CONFIG_X86_32
208 /* This is the TSS defined by the hardware. */
209 struct x86_hw_tss {
210         unsigned short          back_link, __blh;
211         unsigned long           sp0;
212         unsigned short          ss0, __ss0h;
213         unsigned long           sp1;
214         /* ss1 caches MSR_IA32_SYSENTER_CS: */
215         unsigned short          ss1, __ss1h;
216         unsigned long           sp2;
217         unsigned short          ss2, __ss2h;
218         unsigned long           __cr3;
219         unsigned long           ip;
220         unsigned long           flags;
221         unsigned long           ax;
222         unsigned long           cx;
223         unsigned long           dx;
224         unsigned long           bx;
225         unsigned long           sp;
226         unsigned long           bp;
227         unsigned long           si;
228         unsigned long           di;
229         unsigned short          es, __esh;
230         unsigned short          cs, __csh;
231         unsigned short          ss, __ssh;
232         unsigned short          ds, __dsh;
233         unsigned short          fs, __fsh;
234         unsigned short          gs, __gsh;
235         unsigned short          ldt, __ldth;
236         unsigned short          trace;
237         unsigned short          io_bitmap_base;
238 
239 } __attribute__((packed));
240 #else
241 struct x86_hw_tss {
242         u32                     reserved1;
243         u64                     sp0;
244         u64                     sp1;
245         u64                     sp2;
246         u64                     reserved2;
247         u64                     ist[7];
248         u32                     reserved3;
249         u32                     reserved4;
250         u16                     reserved5;
251         u16                     io_bitmap_base;
252 
253 } __attribute__((packed)) ____cacheline_aligned;
254 #endif
255 
256 /*
257  * IO-bitmap sizes:
258  */
259 #define IO_BITMAP_BITS                  65536
260 #define IO_BITMAP_BYTES                 (IO_BITMAP_BITS/8)
261 #define IO_BITMAP_LONGS                 (IO_BITMAP_BYTES/sizeof(long))
262 #define IO_BITMAP_OFFSET                offsetof(struct tss_struct, io_bitmap)
263 #define INVALID_IO_BITMAP_OFFSET        0x8000
264 
265 struct tss_struct {
266         /*
267          * The hardware state:
268          */
269         struct x86_hw_tss       x86_tss;
270 
271         /*
272          * The extra 1 is there because the CPU will access an
273          * additional byte beyond the end of the IO permission
274          * bitmap. The extra byte must be all 1 bits, and must
275          * be within the limit.
276          */
277         unsigned long           io_bitmap[IO_BITMAP_LONGS + 1];
278 
279         /*
280          * .. and then another 0x100 bytes for the emergency kernel stack:
281          */
282         unsigned long           stack[64];
283 
284 } ____cacheline_aligned;
285 
286 DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
287 
288 /*
289  * Save the original ist values for checking stack pointers during debugging
290  */
291 struct orig_ist {
292         unsigned long           ist[7];
293 };
294 
295 #define MXCSR_DEFAULT           0x1f80
296 
297 struct i387_fsave_struct {
298         u32                     cwd;    /* FPU Control Word             */
299         u32                     swd;    /* FPU Status Word              */
300         u32                     twd;    /* FPU Tag Word                 */
301         u32                     fip;    /* FPU IP Offset                */
302         u32                     fcs;    /* FPU IP Selector              */
303         u32                     foo;    /* FPU Operand Pointer Offset   */
304         u32                     fos;    /* FPU Operand Pointer Selector */
305 
306         /* 8*10 bytes for each FP-reg = 80 bytes:                       */
307         u32                     st_space[20];
308 
309         /* Software status information [not touched by FSAVE ]:         */
310         u32                     status;
311 };
312 
313 struct i387_fxsave_struct {
314         u16                     cwd; /* Control Word                    */
315         u16                     swd; /* Status Word                     */
316         u16                     twd; /* Tag Word                        */
317         u16                     fop; /* Last Instruction Opcode         */
318         union {
319                 struct {
320                         u64     rip; /* Instruction Pointer             */
321                         u64     rdp; /* Data Pointer                    */
322                 };
323                 struct {
324                         u32     fip; /* FPU IP Offset                   */
325                         u32     fcs; /* FPU IP Selector                 */
326                         u32     foo; /* FPU Operand Offset              */
327                         u32     fos; /* FPU Operand Selector            */
328                 };
329         };
330         u32                     mxcsr;          /* MXCSR Register State */
331         u32                     mxcsr_mask;     /* MXCSR Mask           */
332 
333         /* 8*16 bytes for each FP-reg = 128 bytes:                      */
334         u32                     st_space[32];
335 
336         /* 16*16 bytes for each XMM-reg = 256 bytes:                    */
337         u32                     xmm_space[64];
338 
339         u32                     padding[12];
340 
341         union {
342                 u32             padding1[12];
343                 u32             sw_reserved[12];
344         };
345 
346 } __attribute__((aligned(16)));
347 
348 struct i387_soft_struct {
349         u32                     cwd;
350         u32                     swd;
351         u32                     twd;
352         u32                     fip;
353         u32                     fcs;
354         u32                     foo;
355         u32                     fos;
356         /* 8*10 bytes for each FP-reg = 80 bytes: */
357         u32                     st_space[20];
358         u8                      ftop;
359         u8                      changed;
360         u8                      lookahead;
361         u8                      no_update;
362         u8                      rm;
363         u8                      alimit;
364         struct math_emu_info    *info;
365         u32                     entry_eip;
366 };
367 
368 struct ymmh_struct {
369         /* 16 * 16 bytes for each YMMH-reg = 256 bytes */
370         u32 ymmh_space[64];
371 };
372 
373 /* We don't support LWP yet: */
374 struct lwp_struct {
375         u8 reserved[128];
376 };
377 
378 struct bndregs_struct {
379         u64 bndregs[8];
380 } __packed;
381 
382 struct bndcsr_struct {
383         u64 cfg_reg_u;
384         u64 status_reg;
385 } __packed;
386 
387 struct xsave_hdr_struct {
388         u64 xstate_bv;
389         u64 reserved1[2];
390         u64 reserved2[5];
391 } __attribute__((packed));
392 
393 struct xsave_struct {
394         struct i387_fxsave_struct i387;
395         struct xsave_hdr_struct xsave_hdr;
396         struct ymmh_struct ymmh;
397         struct lwp_struct lwp;
398         struct bndregs_struct bndregs;
399         struct bndcsr_struct bndcsr;
400         /* new processor state extensions will go here */
401 } __attribute__ ((packed, aligned (64)));
402 
403 union thread_xstate {
404         struct i387_fsave_struct        fsave;
405         struct i387_fxsave_struct       fxsave;
406         struct i387_soft_struct         soft;
407         struct xsave_struct             xsave;
408 };
409 
410 struct fpu {
411         unsigned int last_cpu;
412         unsigned int has_fpu;
413         union thread_xstate *state;
414 };
415 
416 #ifdef CONFIG_X86_64
417 DECLARE_PER_CPU(struct orig_ist, orig_ist);
418 
419 union irq_stack_union {
420         char irq_stack[IRQ_STACK_SIZE];
421         /*
422          * GCC hardcodes the stack canary as %gs:40.  Since the
423          * irq_stack is the object at %gs:0, we reserve the bottom
424          * 48 bytes of the irq stack for the canary.
425          */
426         struct {
427                 char gs_base[40];
428                 unsigned long stack_canary;
429         };
430 };
431 
432 DECLARE_PER_CPU_FIRST(union irq_stack_union, irq_stack_union) __visible;
433 DECLARE_INIT_PER_CPU(irq_stack_union);
434 
435 DECLARE_PER_CPU(char *, irq_stack_ptr);
436 DECLARE_PER_CPU(unsigned int, irq_count);
437 extern asmlinkage void ignore_sysret(void);
438 #else   /* X86_64 */
439 #ifdef CONFIG_CC_STACKPROTECTOR
440 /*
441  * Make sure stack canary segment base is cached-aligned:
442  *   "For Intel Atom processors, avoid non zero segment base address
443  *    that is not aligned to cache line boundary at all cost."
444  * (Optim Ref Manual Assembly/Compiler Coding Rule 15.)
445  */
446 struct stack_canary {
447         char __pad[20];         /* canary at %gs:20 */
448         unsigned long canary;
449 };
450 DECLARE_PER_CPU_ALIGNED(struct stack_canary, stack_canary);
451 #endif
452 #endif  /* X86_64 */
453 
454 extern unsigned int xstate_size;
455 extern void free_thread_xstate(struct task_struct *);
456 extern struct kmem_cache *task_xstate_cachep;
457 
458 struct perf_event;
459 
460 struct thread_struct {
461         /* Cached TLS descriptors: */
462         struct desc_struct      tls_array[GDT_ENTRY_TLS_ENTRIES];
463         unsigned long           sp0;
464         unsigned long           sp;
465 #ifdef CONFIG_X86_32
466         unsigned long           sysenter_cs;
467 #else
468         unsigned long           usersp; /* Copy from PDA */
469         unsigned short          es;
470         unsigned short          ds;
471         unsigned short          fsindex;
472         unsigned short          gsindex;
473 #endif
474 #ifdef CONFIG_X86_32
475         unsigned long           ip;
476 #endif
477 #ifdef CONFIG_X86_64
478         unsigned long           fs;
479 #endif
480         unsigned long           gs;
481         /* Save middle states of ptrace breakpoints */
482         struct perf_event       *ptrace_bps[HBP_NUM];
483         /* Debug status used for traps, single steps, etc... */
484         unsigned long           debugreg6;
485         /* Keep track of the exact dr7 value set by the user */
486         unsigned long           ptrace_dr7;
487         /* Fault info: */
488         unsigned long           cr2;
489         unsigned long           trap_nr;
490         unsigned long           error_code;
491         /* floating point and extended processor state */
492         struct fpu              fpu;
493 #ifdef CONFIG_X86_32
494         /* Virtual 86 mode info */
495         struct vm86_struct __user *vm86_info;
496         unsigned long           screen_bitmap;
497         unsigned long           v86flags;
498         unsigned long           v86mask;
499         unsigned long           saved_sp0;
500         unsigned int            saved_fs;
501         unsigned int            saved_gs;
502 #endif
503         /* IO permissions: */
504         unsigned long           *io_bitmap_ptr;
505         unsigned long           iopl;
506         /* Max allowed port in the bitmap, in bytes: */
507         unsigned                io_bitmap_max;
508         /*
509          * fpu_counter contains the number of consecutive context switches
510          * that the FPU is used. If this is over a threshold, the lazy fpu
511          * saving becomes unlazy to save the trap. This is an unsigned char
512          * so that after 256 times the counter wraps and the behavior turns
513          * lazy again; this to deal with bursty apps that only use FPU for
514          * a short time
515          */
516         unsigned char fpu_counter;
517 };
518 
519 /*
520  * Set IOPL bits in EFLAGS from given mask
521  */
522 static inline void native_set_iopl_mask(unsigned mask)
523 {
524 #ifdef CONFIG_X86_32
525         unsigned int reg;
526 
527         asm volatile ("pushfl;"
528                       "popl %0;"
529                       "andl %1, %0;"
530                       "orl %2, %0;"
531                       "pushl %0;"
532                       "popfl"
533                       : "=&r" (reg)
534                       : "i" (~X86_EFLAGS_IOPL), "r" (mask));
535 #endif
536 }
537 
538 static inline void
539 native_load_sp0(struct tss_struct *tss, struct thread_struct *thread)
540 {
541         tss->x86_tss.sp0 = thread->sp0;
542 #ifdef CONFIG_X86_32
543         /* Only happens when SEP is enabled, no need to test "SEP"arately: */
544         if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) {
545                 tss->x86_tss.ss1 = thread->sysenter_cs;
546                 wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
547         }
548 #endif
549 }
550 
551 static inline void native_swapgs(void)
552 {
553 #ifdef CONFIG_X86_64
554         asm volatile("swapgs" ::: "memory");
555 #endif
556 }
557 
558 #ifdef CONFIG_PARAVIRT
559 #include <asm/paravirt.h>
560 #else
561 #define __cpuid                 native_cpuid
562 #define paravirt_enabled()      0
563 
564 static inline void load_sp0(struct tss_struct *tss,
565                             struct thread_struct *thread)
566 {
567         native_load_sp0(tss, thread);
568 }
569 
570 #define set_iopl_mask native_set_iopl_mask
571 #endif /* CONFIG_PARAVIRT */
572 
573 /*
574  * Save the cr4 feature set we're using (ie
575  * Pentium 4MB enable and PPro Global page
576  * enable), so that any CPU's that boot up
577  * after us can get the correct flags.
578  */
579 extern unsigned long mmu_cr4_features;
580 extern u32 *trampoline_cr4_features;
581 
582 static inline void set_in_cr4(unsigned long mask)
583 {
584         unsigned long cr4;
585 
586         mmu_cr4_features |= mask;
587         if (trampoline_cr4_features)
588                 *trampoline_cr4_features = mmu_cr4_features;
589         cr4 = read_cr4();
590         cr4 |= mask;
591         write_cr4(cr4);
592 }
593 
594 static inline void clear_in_cr4(unsigned long mask)
595 {
596         unsigned long cr4;
597 
598         mmu_cr4_features &= ~mask;
599         if (trampoline_cr4_features)
600                 *trampoline_cr4_features = mmu_cr4_features;
601         cr4 = read_cr4();
602         cr4 &= ~mask;
603         write_cr4(cr4);
604 }
605 
606 typedef struct {
607         unsigned long           seg;
608 } mm_segment_t;
609 
610 
611 /* Free all resources held by a thread. */
612 extern void release_thread(struct task_struct *);
613 
614 unsigned long get_wchan(struct task_struct *p);
615 
616 /*
617  * Generic CPUID function
618  * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx
619  * resulting in stale register contents being returned.
620  */
621 static inline void cpuid(unsigned int op,
622                          unsigned int *eax, unsigned int *ebx,
623                          unsigned int *ecx, unsigned int *edx)
624 {
625         *eax = op;
626         *ecx = 0;
627         __cpuid(eax, ebx, ecx, edx);
628 }
629 
630 /* Some CPUID calls want 'count' to be placed in ecx */
631 static inline void cpuid_count(unsigned int op, int count,
632                                unsigned int *eax, unsigned int *ebx,
633                                unsigned int *ecx, unsigned int *edx)
634 {
635         *eax = op;
636         *ecx = count;
637         __cpuid(eax, ebx, ecx, edx);
638 }
639 
640 /*
641  * CPUID functions returning a single datum
642  */
643 static inline unsigned int cpuid_eax(unsigned int op)
644 {
645         unsigned int eax, ebx, ecx, edx;
646 
647         cpuid(op, &eax, &ebx, &ecx, &edx);
648 
649         return eax;
650 }
651 
652 static inline unsigned int cpuid_ebx(unsigned int op)
653 {
654         unsigned int eax, ebx, ecx, edx;
655 
656         cpuid(op, &eax, &ebx, &ecx, &edx);
657 
658         return ebx;
659 }
660 
661 static inline unsigned int cpuid_ecx(unsigned int op)
662 {
663         unsigned int eax, ebx, ecx, edx;
664 
665         cpuid(op, &eax, &ebx, &ecx, &edx);
666 
667         return ecx;
668 }
669 
670 static inline unsigned int cpuid_edx(unsigned int op)
671 {
672         unsigned int eax, ebx, ecx, edx;
673 
674         cpuid(op, &eax, &ebx, &ecx, &edx);
675 
676         return edx;
677 }
678 
679 /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
680 static inline void rep_nop(void)
681 {
682         asm volatile("rep; nop" ::: "memory");
683 }
684 
685 static inline void cpu_relax(void)
686 {
687         rep_nop();
688 }
689 
690 /* Stop speculative execution and prefetching of modified code. */
691 static inline void sync_core(void)
692 {
693         int tmp;
694 
695 #ifdef CONFIG_M486
696         /*
697          * Do a CPUID if available, otherwise do a jump.  The jump
698          * can conveniently enough be the jump around CPUID.
699          */
700         asm volatile("cmpl %2,%1\n\t"
701                      "jl 1f\n\t"
702                      "cpuid\n"
703                      "1:"
704                      : "=a" (tmp)
705                      : "rm" (boot_cpu_data.cpuid_level), "ri" (0), "" (1)
706                      : "ebx", "ecx", "edx", "memory");
707 #else
708         /*
709          * CPUID is a barrier to speculative execution.
710          * Prefetched instructions are automatically
711          * invalidated when modified.
712          */
713         asm volatile("cpuid"
714                      : "=a" (tmp)
715                      : "" (1)
716                      : "ebx", "ecx", "edx", "memory");
717 #endif
718 }
719 
720 extern void select_idle_routine(const struct cpuinfo_x86 *c);
721 extern void init_amd_e400_c1e_mask(void);
722 
723 extern unsigned long            boot_option_idle_override;
724 extern bool                     amd_e400_c1e_detected;
725 
726 enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_NOMWAIT,
727                          IDLE_POLL};
728 
729 extern void enable_sep_cpu(void);
730 extern int sysenter_setup(void);
731 
732 extern void early_trap_init(void);
733 void early_trap_pf_init(void);
734 
735 /* Defined in head.S */
736 extern struct desc_ptr          early_gdt_descr;
737 
738 extern void cpu_set_gdt(int);
739 extern void switch_to_new_gdt(int);
740 extern void load_percpu_segment(int);
741 extern void cpu_init(void);
742 
743 static inline unsigned long get_debugctlmsr(void)
744 {
745         unsigned long debugctlmsr = 0;
746 
747 #ifndef CONFIG_X86_DEBUGCTLMSR
748         if (boot_cpu_data.x86 < 6)
749                 return 0;
750 #endif
751         rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr);
752 
753         return debugctlmsr;
754 }
755 
756 static inline void update_debugctlmsr(unsigned long debugctlmsr)
757 {
758 #ifndef CONFIG_X86_DEBUGCTLMSR
759         if (boot_cpu_data.x86 < 6)
760                 return;
761 #endif
762         wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr);
763 }
764 
765 extern void set_task_blockstep(struct task_struct *task, bool on);
766 
767 /*
768  * from system description table in BIOS. Mostly for MCA use, but
769  * others may find it useful:
770  */
771 extern unsigned int             machine_id;
772 extern unsigned int             machine_submodel_id;
773 extern unsigned int             BIOS_revision;
774 
775 /* Boot loader type from the setup header: */
776 extern int                      bootloader_type;
777 extern int                      bootloader_version;
778 
779 extern char                     ignore_fpu_irq;
780 
781 #define HAVE_ARCH_PICK_MMAP_LAYOUT 1
782 #define ARCH_HAS_PREFETCHW
783 #define ARCH_HAS_SPINLOCK_PREFETCH
784 
785 #ifdef CONFIG_X86_32
786 # define BASE_PREFETCH          ASM_NOP4
787 # define ARCH_HAS_PREFETCH
788 #else
789 # define BASE_PREFETCH          "prefetcht0 (%1)"
790 #endif
791 
792 /*
793  * Prefetch instructions for Pentium III (+) and AMD Athlon (+)
794  *
795  * It's not worth to care about 3dnow prefetches for the K6
796  * because they are microcoded there and very slow.
797  */
798 static inline void prefetch(const void *x)
799 {
800         alternative_input(BASE_PREFETCH,
801                           "prefetchnta (%1)",
802                           X86_FEATURE_XMM,
803                           "r" (x));
804 }
805 
806 /*
807  * 3dnow prefetch to get an exclusive cache line.
808  * Useful for spinlocks to avoid one state transition in the
809  * cache coherency protocol:
810  */
811 static inline void prefetchw(const void *x)
812 {
813         alternative_input(BASE_PREFETCH,
814                           "prefetchw (%1)",
815                           X86_FEATURE_3DNOW,
816                           "r" (x));
817 }
818 
819 static inline void spin_lock_prefetch(const void *x)
820 {
821         prefetchw(x);
822 }
823 
824 #ifdef CONFIG_X86_32
825 /*
826  * User space process size: 3GB (default).
827  */
828 #define TASK_SIZE               PAGE_OFFSET
829 #define TASK_SIZE_MAX           TASK_SIZE
830 #define STACK_TOP               TASK_SIZE
831 #define STACK_TOP_MAX           STACK_TOP
832 
833 #define INIT_THREAD  {                                                    \
834         .sp0                    = sizeof(init_stack) + (long)&init_stack, \
835         .vm86_info              = NULL,                                   \
836         .sysenter_cs            = __KERNEL_CS,                            \
837         .io_bitmap_ptr          = NULL,                                   \
838 }
839 
840 /*
841  * Note that the .io_bitmap member must be extra-big. This is because
842  * the CPU will access an additional byte beyond the end of the IO
843  * permission bitmap. The extra byte must be all 1 bits, and must
844  * be within the limit.
845  */
846 #define INIT_TSS  {                                                       \
847         .x86_tss = {                                                      \
848                 .sp0            = sizeof(init_stack) + (long)&init_stack, \
849                 .ss0            = __KERNEL_DS,                            \
850                 .ss1            = __KERNEL_CS,                            \
851                 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET,               \
852          },                                                               \
853         .io_bitmap              = { [0 ... IO_BITMAP_LONGS] = ~0 },       \
854 }
855 
856 extern unsigned long thread_saved_pc(struct task_struct *tsk);
857 
858 #define THREAD_SIZE_LONGS      (THREAD_SIZE/sizeof(unsigned long))
859 #define KSTK_TOP(info)                                                 \
860 ({                                                                     \
861        unsigned long *__ptr = (unsigned long *)(info);                 \
862        (unsigned long)(&__ptr[THREAD_SIZE_LONGS]);                     \
863 })
864 
865 /*
866  * The below -8 is to reserve 8 bytes on top of the ring0 stack.
867  * This is necessary to guarantee that the entire "struct pt_regs"
868  * is accessible even if the CPU haven't stored the SS/ESP registers
869  * on the stack (interrupt gate does not save these registers
870  * when switching to the same priv ring).
871  * Therefore beware: accessing the ss/esp fields of the
872  * "struct pt_regs" is possible, but they may contain the
873  * completely wrong values.
874  */
875 #define task_pt_regs(task)                                             \
876 ({                                                                     \
877        struct pt_regs *__regs__;                                       \
878        __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
879        __regs__ - 1;                                                   \
880 })
881 
882 #define KSTK_ESP(task)          (task_pt_regs(task)->sp)
883 
884 #else
885 /*
886  * User space process size. 47bits minus one guard page.
887  */
888 #define TASK_SIZE_MAX   ((1UL << 47) - PAGE_SIZE)
889 
890 /* This decides where the kernel will search for a free chunk of vm
891  * space during mmap's.
892  */
893 #define IA32_PAGE_OFFSET        ((current->personality & ADDR_LIMIT_3GB) ? \
894                                         0xc0000000 : 0xFFFFe000)
895 
896 #define TASK_SIZE               (test_thread_flag(TIF_ADDR32) ? \
897                                         IA32_PAGE_OFFSET : TASK_SIZE_MAX)
898 #define TASK_SIZE_OF(child)     ((test_tsk_thread_flag(child, TIF_ADDR32)) ? \
899                                         IA32_PAGE_OFFSET : TASK_SIZE_MAX)
900 
901 #define STACK_TOP               TASK_SIZE
902 #define STACK_TOP_MAX           TASK_SIZE_MAX
903 
904 #define INIT_THREAD  { \
905         .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
906 }
907 
908 #define INIT_TSS  { \
909         .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
910 }
911 
912 /*
913  * Return saved PC of a blocked thread.
914  * What is this good for? it will be always the scheduler or ret_from_fork.
915  */
916 #define thread_saved_pc(t)      (*(unsigned long *)((t)->thread.sp - 8))
917 
918 #define task_pt_regs(tsk)       ((struct pt_regs *)(tsk)->thread.sp0 - 1)
919 extern unsigned long KSTK_ESP(struct task_struct *task);
920 
921 /*
922  * User space RSP while inside the SYSCALL fast path
923  */
924 DECLARE_PER_CPU(unsigned long, old_rsp);
925 
926 #endif /* CONFIG_X86_64 */
927 
928 extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
929                                                unsigned long new_sp);
930 
931 /*
932  * This decides where the kernel will search for a free chunk of vm
933  * space during mmap's.
934  */
935 #define TASK_UNMAPPED_BASE      (PAGE_ALIGN(TASK_SIZE / 3))
936 
937 #define KSTK_EIP(task)          (task_pt_regs(task)->ip)
938 
939 /* Get/set a process' ability to use the timestamp counter instruction */
940 #define GET_TSC_CTL(adr)        get_tsc_mode((adr))
941 #define SET_TSC_CTL(val)        set_tsc_mode((val))
942 
943 extern int get_tsc_mode(unsigned long adr);
944 extern int set_tsc_mode(unsigned int val);
945 
946 extern u16 amd_get_nb_id(int cpu);
947 
948 static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves)
949 {
950         uint32_t base, eax, signature[3];
951 
952         for (base = 0x40000000; base < 0x40010000; base += 0x100) {
953                 cpuid(base, &eax, &signature[0], &signature[1], &signature[2]);
954 
955                 if (!memcmp(sig, signature, 12) &&
956                     (leaves == 0 || ((eax - base) >= leaves)))
957                         return base;
958         }
959 
960         return 0;
961 }
962 
963 extern unsigned long arch_align_stack(unsigned long sp);
964 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
965 
966 void default_idle(void);
967 #ifdef  CONFIG_XEN
968 bool xen_set_default_idle(void);
969 #else
970 #define xen_set_default_idle 0
971 #endif
972 
973 void stop_this_cpu(void *dummy);
974 void df_debug(struct pt_regs *regs, long error_code);
975 #endif /* _ASM_X86_PROCESSOR_H */
976 

This page was automatically generated by LXR 0.3.1 (source).  •  Linux is a registered trademark of Linus Torvalds  •  Contact us