Version:  2.0.40 2.2.26 2.4.37 3.13 3.14 3.15 3.16 3.17 3.18 3.19 4.0 4.1 4.2 4.3 4.4 4.5 4.6 4.7 4.8 4.9 4.10

Linux/include/linux/sched.h

  1 #ifndef _LINUX_SCHED_H
  2 #define _LINUX_SCHED_H
  3 
  4 #include <uapi/linux/sched.h>
  5 
  6 #include <linux/sched/prio.h>
  7 
  8 
  9 struct sched_param {
 10         int sched_priority;
 11 };
 12 
 13 #include <asm/param.h>  /* for HZ */
 14 
 15 #include <linux/capability.h>
 16 #include <linux/threads.h>
 17 #include <linux/kernel.h>
 18 #include <linux/types.h>
 19 #include <linux/timex.h>
 20 #include <linux/jiffies.h>
 21 #include <linux/plist.h>
 22 #include <linux/rbtree.h>
 23 #include <linux/thread_info.h>
 24 #include <linux/cpumask.h>
 25 #include <linux/errno.h>
 26 #include <linux/nodemask.h>
 27 #include <linux/mm_types.h>
 28 #include <linux/preempt_mask.h>
 29 
 30 #include <asm/page.h>
 31 #include <asm/ptrace.h>
 32 #include <linux/cputime.h>
 33 
 34 #include <linux/smp.h>
 35 #include <linux/sem.h>
 36 #include <linux/shm.h>
 37 #include <linux/signal.h>
 38 #include <linux/compiler.h>
 39 #include <linux/completion.h>
 40 #include <linux/pid.h>
 41 #include <linux/percpu.h>
 42 #include <linux/topology.h>
 43 #include <linux/proportions.h>
 44 #include <linux/seccomp.h>
 45 #include <linux/rcupdate.h>
 46 #include <linux/rculist.h>
 47 #include <linux/rtmutex.h>
 48 
 49 #include <linux/time.h>
 50 #include <linux/param.h>
 51 #include <linux/resource.h>
 52 #include <linux/timer.h>
 53 #include <linux/hrtimer.h>
 54 #include <linux/task_io_accounting.h>
 55 #include <linux/latencytop.h>
 56 #include <linux/cred.h>
 57 #include <linux/llist.h>
 58 #include <linux/uidgid.h>
 59 #include <linux/gfp.h>
 60 #include <linux/magic.h>
 61 
 62 #include <asm/processor.h>
 63 
 64 #define SCHED_ATTR_SIZE_VER0    48      /* sizeof first published struct */
 65 
 66 /*
 67  * Extended scheduling parameters data structure.
 68  *
 69  * This is needed because the original struct sched_param can not be
 70  * altered without introducing ABI issues with legacy applications
 71  * (e.g., in sched_getparam()).
 72  *
 73  * However, the possibility of specifying more than just a priority for
 74  * the tasks may be useful for a wide variety of application fields, e.g.,
 75  * multimedia, streaming, automation and control, and many others.
 76  *
 77  * This variant (sched_attr) is meant at describing a so-called
 78  * sporadic time-constrained task. In such model a task is specified by:
 79  *  - the activation period or minimum instance inter-arrival time;
 80  *  - the maximum (or average, depending on the actual scheduling
 81  *    discipline) computation time of all instances, a.k.a. runtime;
 82  *  - the deadline (relative to the actual activation time) of each
 83  *    instance.
 84  * Very briefly, a periodic (sporadic) task asks for the execution of
 85  * some specific computation --which is typically called an instance--
 86  * (at most) every period. Moreover, each instance typically lasts no more
 87  * than the runtime and must be completed by time instant t equal to
 88  * the instance activation time + the deadline.
 89  *
 90  * This is reflected by the actual fields of the sched_attr structure:
 91  *
 92  *  @size               size of the structure, for fwd/bwd compat.
 93  *
 94  *  @sched_policy       task's scheduling policy
 95  *  @sched_flags        for customizing the scheduler behaviour
 96  *  @sched_nice         task's nice value      (SCHED_NORMAL/BATCH)
 97  *  @sched_priority     task's static priority (SCHED_FIFO/RR)
 98  *  @sched_deadline     representative of the task's deadline
 99  *  @sched_runtime      representative of the task's runtime
100  *  @sched_period       representative of the task's period
101  *
102  * Given this task model, there are a multiplicity of scheduling algorithms
103  * and policies, that can be used to ensure all the tasks will make their
104  * timing constraints.
105  *
106  * As of now, the SCHED_DEADLINE policy (sched_dl scheduling class) is the
107  * only user of this new interface. More information about the algorithm
108  * available in the scheduling class file or in Documentation/.
109  */
110 struct sched_attr {
111         u32 size;
112 
113         u32 sched_policy;
114         u64 sched_flags;
115 
116         /* SCHED_NORMAL, SCHED_BATCH */
117         s32 sched_nice;
118 
119         /* SCHED_FIFO, SCHED_RR */
120         u32 sched_priority;
121 
122         /* SCHED_DEADLINE */
123         u64 sched_runtime;
124         u64 sched_deadline;
125         u64 sched_period;
126 };
127 
128 struct futex_pi_state;
129 struct robust_list_head;
130 struct bio_list;
131 struct fs_struct;
132 struct perf_event_context;
133 struct blk_plug;
134 struct filename;
135 
136 #define VMACACHE_BITS 2
137 #define VMACACHE_SIZE (1U << VMACACHE_BITS)
138 #define VMACACHE_MASK (VMACACHE_SIZE - 1)
139 
140 /*
141  * These are the constant used to fake the fixed-point load-average
142  * counting. Some notes:
143  *  - 11 bit fractions expand to 22 bits by the multiplies: this gives
144  *    a load-average precision of 10 bits integer + 11 bits fractional
145  *  - if you want to count load-averages more often, you need more
146  *    precision, or rounding will get you. With 2-second counting freq,
147  *    the EXP_n values would be 1981, 2034 and 2043 if still using only
148  *    11 bit fractions.
149  */
150 extern unsigned long avenrun[];         /* Load averages */
151 extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift);
152 
153 #define FSHIFT          11              /* nr of bits of precision */
154 #define FIXED_1         (1<<FSHIFT)     /* 1.0 as fixed-point */
155 #define LOAD_FREQ       (5*HZ+1)        /* 5 sec intervals */
156 #define EXP_1           1884            /* 1/exp(5sec/1min) as fixed-point */
157 #define EXP_5           2014            /* 1/exp(5sec/5min) */
158 #define EXP_15          2037            /* 1/exp(5sec/15min) */
159 
160 #define CALC_LOAD(load,exp,n) \
161         load *= exp; \
162         load += n*(FIXED_1-exp); \
163         load >>= FSHIFT;
164 
165 extern unsigned long total_forks;
166 extern int nr_threads;
167 DECLARE_PER_CPU(unsigned long, process_counts);
168 extern int nr_processes(void);
169 extern unsigned long nr_running(void);
170 extern bool single_task_running(void);
171 extern unsigned long nr_iowait(void);
172 extern unsigned long nr_iowait_cpu(int cpu);
173 extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load);
174 
175 extern void calc_global_load(unsigned long ticks);
176 extern void update_cpu_load_nohz(void);
177 
178 extern unsigned long get_parent_ip(unsigned long addr);
179 
180 extern void dump_cpu_task(int cpu);
181 
182 struct seq_file;
183 struct cfs_rq;
184 struct task_group;
185 #ifdef CONFIG_SCHED_DEBUG
186 extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m);
187 extern void proc_sched_set_task(struct task_struct *p);
188 extern void
189 print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
190 #endif
191 
192 /*
193  * Task state bitmask. NOTE! These bits are also
194  * encoded in fs/proc/array.c: get_task_state().
195  *
196  * We have two separate sets of flags: task->state
197  * is about runnability, while task->exit_state are
198  * about the task exiting. Confusing, but this way
199  * modifying one set can't modify the other one by
200  * mistake.
201  */
202 #define TASK_RUNNING            0
203 #define TASK_INTERRUPTIBLE      1
204 #define TASK_UNINTERRUPTIBLE    2
205 #define __TASK_STOPPED          4
206 #define __TASK_TRACED           8
207 /* in tsk->exit_state */
208 #define EXIT_DEAD               16
209 #define EXIT_ZOMBIE             32
210 #define EXIT_TRACE              (EXIT_ZOMBIE | EXIT_DEAD)
211 /* in tsk->state again */
212 #define TASK_DEAD               64
213 #define TASK_WAKEKILL           128
214 #define TASK_WAKING             256
215 #define TASK_PARKED             512
216 #define TASK_STATE_MAX          1024
217 
218 #define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWP"
219 
220 extern char ___assert_task_state[1 - 2*!!(
221                 sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)];
222 
223 /* Convenience macros for the sake of set_task_state */
224 #define TASK_KILLABLE           (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
225 #define TASK_STOPPED            (TASK_WAKEKILL | __TASK_STOPPED)
226 #define TASK_TRACED             (TASK_WAKEKILL | __TASK_TRACED)
227 
228 /* Convenience macros for the sake of wake_up */
229 #define TASK_NORMAL             (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
230 #define TASK_ALL                (TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)
231 
232 /* get_task_state() */
233 #define TASK_REPORT             (TASK_RUNNING | TASK_INTERRUPTIBLE | \
234                                  TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
235                                  __TASK_TRACED | EXIT_ZOMBIE | EXIT_DEAD)
236 
237 #define task_is_traced(task)    ((task->state & __TASK_TRACED) != 0)
238 #define task_is_stopped(task)   ((task->state & __TASK_STOPPED) != 0)
239 #define task_is_stopped_or_traced(task) \
240                         ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
241 #define task_contributes_to_load(task)  \
242                                 ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
243                                  (task->flags & PF_FROZEN) == 0)
244 
245 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
246 
247 #define __set_task_state(tsk, state_value)                      \
248         do {                                                    \
249                 (tsk)->task_state_change = _THIS_IP_;           \
250                 (tsk)->state = (state_value);                   \
251         } while (0)
252 #define set_task_state(tsk, state_value)                        \
253         do {                                                    \
254                 (tsk)->task_state_change = _THIS_IP_;           \
255                 set_mb((tsk)->state, (state_value));            \
256         } while (0)
257 
258 /*
259  * set_current_state() includes a barrier so that the write of current->state
260  * is correctly serialised wrt the caller's subsequent test of whether to
261  * actually sleep:
262  *
263  *      set_current_state(TASK_UNINTERRUPTIBLE);
264  *      if (do_i_need_to_sleep())
265  *              schedule();
266  *
267  * If the caller does not need such serialisation then use __set_current_state()
268  */
269 #define __set_current_state(state_value)                        \
270         do {                                                    \
271                 current->task_state_change = _THIS_IP_;         \
272                 current->state = (state_value);                 \
273         } while (0)
274 #define set_current_state(state_value)                          \
275         do {                                                    \
276                 current->task_state_change = _THIS_IP_;         \
277                 set_mb(current->state, (state_value));          \
278         } while (0)
279 
280 #else
281 
282 #define __set_task_state(tsk, state_value)              \
283         do { (tsk)->state = (state_value); } while (0)
284 #define set_task_state(tsk, state_value)                \
285         set_mb((tsk)->state, (state_value))
286 
287 /*
288  * set_current_state() includes a barrier so that the write of current->state
289  * is correctly serialised wrt the caller's subsequent test of whether to
290  * actually sleep:
291  *
292  *      set_current_state(TASK_UNINTERRUPTIBLE);
293  *      if (do_i_need_to_sleep())
294  *              schedule();
295  *
296  * If the caller does not need such serialisation then use __set_current_state()
297  */
298 #define __set_current_state(state_value)                \
299         do { current->state = (state_value); } while (0)
300 #define set_current_state(state_value)                  \
301         set_mb(current->state, (state_value))
302 
303 #endif
304 
305 /* Task command name length */
306 #define TASK_COMM_LEN 16
307 
308 #include <linux/spinlock.h>
309 
310 /*
311  * This serializes "schedule()" and also protects
312  * the run-queue from deletions/modifications (but
313  * _adding_ to the beginning of the run-queue has
314  * a separate lock).
315  */
316 extern rwlock_t tasklist_lock;
317 extern spinlock_t mmlist_lock;
318 
319 struct task_struct;
320 
321 #ifdef CONFIG_PROVE_RCU
322 extern int lockdep_tasklist_lock_is_held(void);
323 #endif /* #ifdef CONFIG_PROVE_RCU */
324 
325 extern void sched_init(void);
326 extern void sched_init_smp(void);
327 extern asmlinkage void schedule_tail(struct task_struct *prev);
328 extern void init_idle(struct task_struct *idle, int cpu);
329 extern void init_idle_bootup_task(struct task_struct *idle);
330 
331 extern cpumask_var_t cpu_isolated_map;
332 
333 extern int runqueue_is_locked(int cpu);
334 
335 #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
336 extern void nohz_balance_enter_idle(int cpu);
337 extern void set_cpu_sd_state_idle(void);
338 extern int get_nohz_timer_target(int pinned);
339 #else
340 static inline void nohz_balance_enter_idle(int cpu) { }
341 static inline void set_cpu_sd_state_idle(void) { }
342 static inline int get_nohz_timer_target(int pinned)
343 {
344         return smp_processor_id();
345 }
346 #endif
347 
348 /*
349  * Only dump TASK_* tasks. (0 for all tasks)
350  */
351 extern void show_state_filter(unsigned long state_filter);
352 
353 static inline void show_state(void)
354 {
355         show_state_filter(0);
356 }
357 
358 extern void show_regs(struct pt_regs *);
359 
360 /*
361  * TASK is a pointer to the task whose backtrace we want to see (or NULL for current
362  * task), SP is the stack pointer of the first frame that should be shown in the back
363  * trace (or NULL if the entire call-chain of the task should be shown).
364  */
365 extern void show_stack(struct task_struct *task, unsigned long *sp);
366 
367 extern void cpu_init (void);
368 extern void trap_init(void);
369 extern void update_process_times(int user);
370 extern void scheduler_tick(void);
371 
372 extern void sched_show_task(struct task_struct *p);
373 
374 #ifdef CONFIG_LOCKUP_DETECTOR
375 extern void touch_softlockup_watchdog(void);
376 extern void touch_softlockup_watchdog_sync(void);
377 extern void touch_all_softlockup_watchdogs(void);
378 extern int proc_dowatchdog_thresh(struct ctl_table *table, int write,
379                                   void __user *buffer,
380                                   size_t *lenp, loff_t *ppos);
381 extern unsigned int  softlockup_panic;
382 void lockup_detector_init(void);
383 #else
384 static inline void touch_softlockup_watchdog(void)
385 {
386 }
387 static inline void touch_softlockup_watchdog_sync(void)
388 {
389 }
390 static inline void touch_all_softlockup_watchdogs(void)
391 {
392 }
393 static inline void lockup_detector_init(void)
394 {
395 }
396 #endif
397 
398 #ifdef CONFIG_DETECT_HUNG_TASK
399 void reset_hung_task_detector(void);
400 #else
401 static inline void reset_hung_task_detector(void)
402 {
403 }
404 #endif
405 
406 /* Attach to any functions which should be ignored in wchan output. */
407 #define __sched         __attribute__((__section__(".sched.text")))
408 
409 /* Linker adds these: start and end of __sched functions */
410 extern char __sched_text_start[], __sched_text_end[];
411 
412 /* Is this address in the __sched functions? */
413 extern int in_sched_functions(unsigned long addr);
414 
415 #define MAX_SCHEDULE_TIMEOUT    LONG_MAX
416 extern signed long schedule_timeout(signed long timeout);
417 extern signed long schedule_timeout_interruptible(signed long timeout);
418 extern signed long schedule_timeout_killable(signed long timeout);
419 extern signed long schedule_timeout_uninterruptible(signed long timeout);
420 asmlinkage void schedule(void);
421 extern void schedule_preempt_disabled(void);
422 
423 extern long io_schedule_timeout(long timeout);
424 
425 static inline void io_schedule(void)
426 {
427         io_schedule_timeout(MAX_SCHEDULE_TIMEOUT);
428 }
429 
430 struct nsproxy;
431 struct user_namespace;
432 
433 #ifdef CONFIG_MMU
434 extern void arch_pick_mmap_layout(struct mm_struct *mm);
435 extern unsigned long
436 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
437                        unsigned long, unsigned long);
438 extern unsigned long
439 arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
440                           unsigned long len, unsigned long pgoff,
441                           unsigned long flags);
442 #else
443 static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
444 #endif
445 
446 #define SUID_DUMP_DISABLE       0       /* No setuid dumping */
447 #define SUID_DUMP_USER          1       /* Dump as user of process */
448 #define SUID_DUMP_ROOT          2       /* Dump as root */
449 
450 /* mm flags */
451 
452 /* for SUID_DUMP_* above */
453 #define MMF_DUMPABLE_BITS 2
454 #define MMF_DUMPABLE_MASK ((1 << MMF_DUMPABLE_BITS) - 1)
455 
456 extern void set_dumpable(struct mm_struct *mm, int value);
457 /*
458  * This returns the actual value of the suid_dumpable flag. For things
459  * that are using this for checking for privilege transitions, it must
460  * test against SUID_DUMP_USER rather than treating it as a boolean
461  * value.
462  */
463 static inline int __get_dumpable(unsigned long mm_flags)
464 {
465         return mm_flags & MMF_DUMPABLE_MASK;
466 }
467 
468 static inline int get_dumpable(struct mm_struct *mm)
469 {
470         return __get_dumpable(mm->flags);
471 }
472 
473 /* coredump filter bits */
474 #define MMF_DUMP_ANON_PRIVATE   2
475 #define MMF_DUMP_ANON_SHARED    3
476 #define MMF_DUMP_MAPPED_PRIVATE 4
477 #define MMF_DUMP_MAPPED_SHARED  5
478 #define MMF_DUMP_ELF_HEADERS    6
479 #define MMF_DUMP_HUGETLB_PRIVATE 7
480 #define MMF_DUMP_HUGETLB_SHARED  8
481 
482 #define MMF_DUMP_FILTER_SHIFT   MMF_DUMPABLE_BITS
483 #define MMF_DUMP_FILTER_BITS    7
484 #define MMF_DUMP_FILTER_MASK \
485         (((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT)
486 #define MMF_DUMP_FILTER_DEFAULT \
487         ((1 << MMF_DUMP_ANON_PRIVATE) | (1 << MMF_DUMP_ANON_SHARED) |\
488          (1 << MMF_DUMP_HUGETLB_PRIVATE) | MMF_DUMP_MASK_DEFAULT_ELF)
489 
490 #ifdef CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS
491 # define MMF_DUMP_MASK_DEFAULT_ELF      (1 << MMF_DUMP_ELF_HEADERS)
492 #else
493 # define MMF_DUMP_MASK_DEFAULT_ELF      0
494 #endif
495                                         /* leave room for more dump flags */
496 #define MMF_VM_MERGEABLE        16      /* KSM may merge identical pages */
497 #define MMF_VM_HUGEPAGE         17      /* set when VM_HUGEPAGE is set on vma */
498 #define MMF_EXE_FILE_CHANGED    18      /* see prctl_set_mm_exe_file() */
499 
500 #define MMF_HAS_UPROBES         19      /* has uprobes */
501 #define MMF_RECALC_UPROBES      20      /* MMF_HAS_UPROBES can be wrong */
502 
503 #define MMF_INIT_MASK           (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK)
504 
505 struct sighand_struct {
506         atomic_t                count;
507         struct k_sigaction      action[_NSIG];
508         spinlock_t              siglock;
509         wait_queue_head_t       signalfd_wqh;
510 };
511 
512 struct pacct_struct {
513         int                     ac_flag;
514         long                    ac_exitcode;
515         unsigned long           ac_mem;
516         cputime_t               ac_utime, ac_stime;
517         unsigned long           ac_minflt, ac_majflt;
518 };
519 
520 struct cpu_itimer {
521         cputime_t expires;
522         cputime_t incr;
523         u32 error;
524         u32 incr_error;
525 };
526 
527 /**
528  * struct cputime - snaphsot of system and user cputime
529  * @utime: time spent in user mode
530  * @stime: time spent in system mode
531  *
532  * Gathers a generic snapshot of user and system time.
533  */
534 struct cputime {
535         cputime_t utime;
536         cputime_t stime;
537 };
538 
539 /**
540  * struct task_cputime - collected CPU time counts
541  * @utime:              time spent in user mode, in &cputime_t units
542  * @stime:              time spent in kernel mode, in &cputime_t units
543  * @sum_exec_runtime:   total time spent on the CPU, in nanoseconds
544  *
545  * This is an extension of struct cputime that includes the total runtime
546  * spent by the task from the scheduler point of view.
547  *
548  * As a result, this structure groups together three kinds of CPU time
549  * that are tracked for threads and thread groups.  Most things considering
550  * CPU time want to group these counts together and treat all three
551  * of them in parallel.
552  */
553 struct task_cputime {
554         cputime_t utime;
555         cputime_t stime;
556         unsigned long long sum_exec_runtime;
557 };
558 /* Alternate field names when used to cache expirations. */
559 #define prof_exp        stime
560 #define virt_exp        utime
561 #define sched_exp       sum_exec_runtime
562 
563 #define INIT_CPUTIME    \
564         (struct task_cputime) {                                 \
565                 .utime = 0,                                     \
566                 .stime = 0,                                     \
567                 .sum_exec_runtime = 0,                          \
568         }
569 
570 #ifdef CONFIG_PREEMPT_COUNT
571 #define PREEMPT_DISABLED        (1 + PREEMPT_ENABLED)
572 #else
573 #define PREEMPT_DISABLED        PREEMPT_ENABLED
574 #endif
575 
576 /*
577  * Disable preemption until the scheduler is running.
578  * Reset by start_kernel()->sched_init()->init_idle().
579  *
580  * We include PREEMPT_ACTIVE to avoid cond_resched() from working
581  * before the scheduler is active -- see should_resched().
582  */
583 #define INIT_PREEMPT_COUNT      (PREEMPT_DISABLED + PREEMPT_ACTIVE)
584 
585 /**
586  * struct thread_group_cputimer - thread group interval timer counts
587  * @cputime:            thread group interval timers.
588  * @running:            non-zero when there are timers running and
589  *                      @cputime receives updates.
590  * @lock:               lock for fields in this struct.
591  *
592  * This structure contains the version of task_cputime, above, that is
593  * used for thread group CPU timer calculations.
594  */
595 struct thread_group_cputimer {
596         struct task_cputime cputime;
597         int running;
598         raw_spinlock_t lock;
599 };
600 
601 #include <linux/rwsem.h>
602 struct autogroup;
603 
604 /*
605  * NOTE! "signal_struct" does not have its own
606  * locking, because a shared signal_struct always
607  * implies a shared sighand_struct, so locking
608  * sighand_struct is always a proper superset of
609  * the locking of signal_struct.
610  */
611 struct signal_struct {
612         atomic_t                sigcnt;
613         atomic_t                live;
614         int                     nr_threads;
615         struct list_head        thread_head;
616 
617         wait_queue_head_t       wait_chldexit;  /* for wait4() */
618 
619         /* current thread group signal load-balancing target: */
620         struct task_struct      *curr_target;
621 
622         /* shared signal handling: */
623         struct sigpending       shared_pending;
624 
625         /* thread group exit support */
626         int                     group_exit_code;
627         /* overloaded:
628          * - notify group_exit_task when ->count is equal to notify_count
629          * - everyone except group_exit_task is stopped during signal delivery
630          *   of fatal signals, group_exit_task processes the signal.
631          */
632         int                     notify_count;
633         struct task_struct      *group_exit_task;
634 
635         /* thread group stop support, overloads group_exit_code too */
636         int                     group_stop_count;
637         unsigned int            flags; /* see SIGNAL_* flags below */
638 
639         /*
640          * PR_SET_CHILD_SUBREAPER marks a process, like a service
641          * manager, to re-parent orphan (double-forking) child processes
642          * to this process instead of 'init'. The service manager is
643          * able to receive SIGCHLD signals and is able to investigate
644          * the process until it calls wait(). All children of this
645          * process will inherit a flag if they should look for a
646          * child_subreaper process at exit.
647          */
648         unsigned int            is_child_subreaper:1;
649         unsigned int            has_child_subreaper:1;
650 
651         /* POSIX.1b Interval Timers */
652         int                     posix_timer_id;
653         struct list_head        posix_timers;
654 
655         /* ITIMER_REAL timer for the process */
656         struct hrtimer real_timer;
657         struct pid *leader_pid;
658         ktime_t it_real_incr;
659 
660         /*
661          * ITIMER_PROF and ITIMER_VIRTUAL timers for the process, we use
662          * CPUCLOCK_PROF and CPUCLOCK_VIRT for indexing array as these
663          * values are defined to 0 and 1 respectively
664          */
665         struct cpu_itimer it[2];
666 
667         /*
668          * Thread group totals for process CPU timers.
669          * See thread_group_cputimer(), et al, for details.
670          */
671         struct thread_group_cputimer cputimer;
672 
673         /* Earliest-expiration cache. */
674         struct task_cputime cputime_expires;
675 
676         struct list_head cpu_timers[3];
677 
678         struct pid *tty_old_pgrp;
679 
680         /* boolean value for session group leader */
681         int leader;
682 
683         struct tty_struct *tty; /* NULL if no tty */
684 
685 #ifdef CONFIG_SCHED_AUTOGROUP
686         struct autogroup *autogroup;
687 #endif
688         /*
689          * Cumulative resource counters for dead threads in the group,
690          * and for reaped dead child processes forked by this group.
691          * Live threads maintain their own counters and add to these
692          * in __exit_signal, except for the group leader.
693          */
694         seqlock_t stats_lock;
695         cputime_t utime, stime, cutime, cstime;
696         cputime_t gtime;
697         cputime_t cgtime;
698 #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
699         struct cputime prev_cputime;
700 #endif
701         unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
702         unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
703         unsigned long inblock, oublock, cinblock, coublock;
704         unsigned long maxrss, cmaxrss;
705         struct task_io_accounting ioac;
706 
707         /*
708          * Cumulative ns of schedule CPU time fo dead threads in the
709          * group, not including a zombie group leader, (This only differs
710          * from jiffies_to_ns(utime + stime) if sched_clock uses something
711          * other than jiffies.)
712          */
713         unsigned long long sum_sched_runtime;
714 
715         /*
716          * We don't bother to synchronize most readers of this at all,
717          * because there is no reader checking a limit that actually needs
718          * to get both rlim_cur and rlim_max atomically, and either one
719          * alone is a single word that can safely be read normally.
720          * getrlimit/setrlimit use task_lock(current->group_leader) to
721          * protect this instead of the siglock, because they really
722          * have no need to disable irqs.
723          */
724         struct rlimit rlim[RLIM_NLIMITS];
725 
726 #ifdef CONFIG_BSD_PROCESS_ACCT
727         struct pacct_struct pacct;      /* per-process accounting information */
728 #endif
729 #ifdef CONFIG_TASKSTATS
730         struct taskstats *stats;
731 #endif
732 #ifdef CONFIG_AUDIT
733         unsigned audit_tty;
734         unsigned audit_tty_log_passwd;
735         struct tty_audit_buf *tty_audit_buf;
736 #endif
737 #ifdef CONFIG_CGROUPS
738         /*
739          * group_rwsem prevents new tasks from entering the threadgroup and
740          * member tasks from exiting,a more specifically, setting of
741          * PF_EXITING.  fork and exit paths are protected with this rwsem
742          * using threadgroup_change_begin/end().  Users which require
743          * threadgroup to remain stable should use threadgroup_[un]lock()
744          * which also takes care of exec path.  Currently, cgroup is the
745          * only user.
746          */
747         struct rw_semaphore group_rwsem;
748 #endif
749 
750         oom_flags_t oom_flags;
751         short oom_score_adj;            /* OOM kill score adjustment */
752         short oom_score_adj_min;        /* OOM kill score adjustment min value.
753                                          * Only settable by CAP_SYS_RESOURCE. */
754 
755         struct mutex cred_guard_mutex;  /* guard against foreign influences on
756                                          * credential calculations
757                                          * (notably. ptrace) */
758 };
759 
760 /*
761  * Bits in flags field of signal_struct.
762  */
763 #define SIGNAL_STOP_STOPPED     0x00000001 /* job control stop in effect */
764 #define SIGNAL_STOP_CONTINUED   0x00000002 /* SIGCONT since WCONTINUED reap */
765 #define SIGNAL_GROUP_EXIT       0x00000004 /* group exit in progress */
766 #define SIGNAL_GROUP_COREDUMP   0x00000008 /* coredump in progress */
767 /*
768  * Pending notifications to parent.
769  */
770 #define SIGNAL_CLD_STOPPED      0x00000010
771 #define SIGNAL_CLD_CONTINUED    0x00000020
772 #define SIGNAL_CLD_MASK         (SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED)
773 
774 #define SIGNAL_UNKILLABLE       0x00000040 /* for init: ignore fatal signals */
775 
776 /* If true, all threads except ->group_exit_task have pending SIGKILL */
777 static inline int signal_group_exit(const struct signal_struct *sig)
778 {
779         return  (sig->flags & SIGNAL_GROUP_EXIT) ||
780                 (sig->group_exit_task != NULL);
781 }
782 
783 /*
784  * Some day this will be a full-fledged user tracking system..
785  */
786 struct user_struct {
787         atomic_t __count;       /* reference count */
788         atomic_t processes;     /* How many processes does this user have? */
789         atomic_t sigpending;    /* How many pending signals does this user have? */
790 #ifdef CONFIG_INOTIFY_USER
791         atomic_t inotify_watches; /* How many inotify watches does this user have? */
792         atomic_t inotify_devs;  /* How many inotify devs does this user have opened? */
793 #endif
794 #ifdef CONFIG_FANOTIFY
795         atomic_t fanotify_listeners;
796 #endif
797 #ifdef CONFIG_EPOLL
798         atomic_long_t epoll_watches; /* The number of file descriptors currently watched */
799 #endif
800 #ifdef CONFIG_POSIX_MQUEUE
801         /* protected by mq_lock */
802         unsigned long mq_bytes; /* How many bytes can be allocated to mqueue? */
803 #endif
804         unsigned long locked_shm; /* How many pages of mlocked shm ? */
805 
806 #ifdef CONFIG_KEYS
807         struct key *uid_keyring;        /* UID specific keyring */
808         struct key *session_keyring;    /* UID's default session keyring */
809 #endif
810 
811         /* Hash table maintenance information */
812         struct hlist_node uidhash_node;
813         kuid_t uid;
814 
815 #ifdef CONFIG_PERF_EVENTS
816         atomic_long_t locked_vm;
817 #endif
818 };
819 
820 extern int uids_sysfs_init(void);
821 
822 extern struct user_struct *find_user(kuid_t);
823 
824 extern struct user_struct root_user;
825 #define INIT_USER (&root_user)
826 
827 
828 struct backing_dev_info;
829 struct reclaim_state;
830 
831 #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
832 struct sched_info {
833         /* cumulative counters */
834         unsigned long pcount;         /* # of times run on this cpu */
835         unsigned long long run_delay; /* time spent waiting on a runqueue */
836 
837         /* timestamps */
838         unsigned long long last_arrival,/* when we last ran on a cpu */
839                            last_queued; /* when we were last queued to run */
840 };
841 #endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */
842 
843 #ifdef CONFIG_TASK_DELAY_ACCT
844 struct task_delay_info {
845         spinlock_t      lock;
846         unsigned int    flags;  /* Private per-task flags */
847 
848         /* For each stat XXX, add following, aligned appropriately
849          *
850          * struct timespec XXX_start, XXX_end;
851          * u64 XXX_delay;
852          * u32 XXX_count;
853          *
854          * Atomicity of updates to XXX_delay, XXX_count protected by
855          * single lock above (split into XXX_lock if contention is an issue).
856          */
857 
858         /*
859          * XXX_count is incremented on every XXX operation, the delay
860          * associated with the operation is added to XXX_delay.
861          * XXX_delay contains the accumulated delay time in nanoseconds.
862          */
863         u64 blkio_start;        /* Shared by blkio, swapin */
864         u64 blkio_delay;        /* wait for sync block io completion */
865         u64 swapin_delay;       /* wait for swapin block io completion */
866         u32 blkio_count;        /* total count of the number of sync block */
867                                 /* io operations performed */
868         u32 swapin_count;       /* total count of the number of swapin block */
869                                 /* io operations performed */
870 
871         u64 freepages_start;
872         u64 freepages_delay;    /* wait for memory reclaim */
873         u32 freepages_count;    /* total count of memory reclaim */
874 };
875 #endif  /* CONFIG_TASK_DELAY_ACCT */
876 
877 static inline int sched_info_on(void)
878 {
879 #ifdef CONFIG_SCHEDSTATS
880         return 1;
881 #elif defined(CONFIG_TASK_DELAY_ACCT)
882         extern int delayacct_on;
883         return delayacct_on;
884 #else
885         return 0;
886 #endif
887 }
888 
889 enum cpu_idle_type {
890         CPU_IDLE,
891         CPU_NOT_IDLE,
892         CPU_NEWLY_IDLE,
893         CPU_MAX_IDLE_TYPES
894 };
895 
896 /*
897  * Increase resolution of cpu_capacity calculations
898  */
899 #define SCHED_CAPACITY_SHIFT    10
900 #define SCHED_CAPACITY_SCALE    (1L << SCHED_CAPACITY_SHIFT)
901 
902 /*
903  * sched-domains (multiprocessor balancing) declarations:
904  */
905 #ifdef CONFIG_SMP
906 #define SD_LOAD_BALANCE         0x0001  /* Do load balancing on this domain. */
907 #define SD_BALANCE_NEWIDLE      0x0002  /* Balance when about to become idle */
908 #define SD_BALANCE_EXEC         0x0004  /* Balance on exec */
909 #define SD_BALANCE_FORK         0x0008  /* Balance on fork, clone */
910 #define SD_BALANCE_WAKE         0x0010  /* Balance on wakeup */
911 #define SD_WAKE_AFFINE          0x0020  /* Wake task to waking CPU */
912 #define SD_SHARE_CPUCAPACITY    0x0080  /* Domain members share cpu power */
913 #define SD_SHARE_POWERDOMAIN    0x0100  /* Domain members share power domain */
914 #define SD_SHARE_PKG_RESOURCES  0x0200  /* Domain members share cpu pkg resources */
915 #define SD_SERIALIZE            0x0400  /* Only a single load balancing instance */
916 #define SD_ASYM_PACKING         0x0800  /* Place busy groups earlier in the domain */
917 #define SD_PREFER_SIBLING       0x1000  /* Prefer to place tasks in a sibling domain */
918 #define SD_OVERLAP              0x2000  /* sched_domains of this level overlap */
919 #define SD_NUMA                 0x4000  /* cross-node balancing */
920 
921 #ifdef CONFIG_SCHED_SMT
922 static inline int cpu_smt_flags(void)
923 {
924         return SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
925 }
926 #endif
927 
928 #ifdef CONFIG_SCHED_MC
929 static inline int cpu_core_flags(void)
930 {
931         return SD_SHARE_PKG_RESOURCES;
932 }
933 #endif
934 
935 #ifdef CONFIG_NUMA
936 static inline int cpu_numa_flags(void)
937 {
938         return SD_NUMA;
939 }
940 #endif
941 
942 struct sched_domain_attr {
943         int relax_domain_level;
944 };
945 
946 #define SD_ATTR_INIT    (struct sched_domain_attr) {    \
947         .relax_domain_level = -1,                       \
948 }
949 
950 extern int sched_domain_level_max;
951 
952 struct sched_group;
953 
954 struct sched_domain {
955         /* These fields must be setup */
956         struct sched_domain *parent;    /* top domain must be null terminated */
957         struct sched_domain *child;     /* bottom domain must be null terminated */
958         struct sched_group *groups;     /* the balancing groups of the domain */
959         unsigned long min_interval;     /* Minimum balance interval ms */
960         unsigned long max_interval;     /* Maximum balance interval ms */
961         unsigned int busy_factor;       /* less balancing by factor if busy */
962         unsigned int imbalance_pct;     /* No balance until over watermark */
963         unsigned int cache_nice_tries;  /* Leave cache hot tasks for # tries */
964         unsigned int busy_idx;
965         unsigned int idle_idx;
966         unsigned int newidle_idx;
967         unsigned int wake_idx;
968         unsigned int forkexec_idx;
969         unsigned int smt_gain;
970 
971         int nohz_idle;                  /* NOHZ IDLE status */
972         int flags;                      /* See SD_* */
973         int level;
974 
975         /* Runtime fields. */
976         unsigned long last_balance;     /* init to jiffies. units in jiffies */
977         unsigned int balance_interval;  /* initialise to 1. units in ms. */
978         unsigned int nr_balance_failed; /* initialise to 0 */
979 
980         /* idle_balance() stats */
981         u64 max_newidle_lb_cost;
982         unsigned long next_decay_max_lb_cost;
983 
984 #ifdef CONFIG_SCHEDSTATS
985         /* load_balance() stats */
986         unsigned int lb_count[CPU_MAX_IDLE_TYPES];
987         unsigned int lb_failed[CPU_MAX_IDLE_TYPES];
988         unsigned int lb_balanced[CPU_MAX_IDLE_TYPES];
989         unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES];
990         unsigned int lb_gained[CPU_MAX_IDLE_TYPES];
991         unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES];
992         unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES];
993         unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES];
994 
995         /* Active load balancing */
996         unsigned int alb_count;
997         unsigned int alb_failed;
998         unsigned int alb_pushed;
999 
1000         /* SD_BALANCE_EXEC stats */
1001         unsigned int sbe_count;
1002         unsigned int sbe_balanced;
1003         unsigned int sbe_pushed;
1004 
1005         /* SD_BALANCE_FORK stats */
1006         unsigned int sbf_count;
1007         unsigned int sbf_balanced;
1008         unsigned int sbf_pushed;
1009 
1010         /* try_to_wake_up() stats */
1011         unsigned int ttwu_wake_remote;
1012         unsigned int ttwu_move_affine;
1013         unsigned int ttwu_move_balance;
1014 #endif
1015 #ifdef CONFIG_SCHED_DEBUG
1016         char *name;
1017 #endif
1018         union {
1019                 void *private;          /* used during construction */
1020                 struct rcu_head rcu;    /* used during destruction */
1021         };
1022 
1023         unsigned int span_weight;
1024         /*
1025          * Span of all CPUs in this domain.
1026          *
1027          * NOTE: this field is variable length. (Allocated dynamically
1028          * by attaching extra space to the end of the structure,
1029          * depending on how many CPUs the kernel has booted up with)
1030          */
1031         unsigned long span[0];
1032 };
1033 
1034 static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
1035 {
1036         return to_cpumask(sd->span);
1037 }
1038 
1039 extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
1040                                     struct sched_domain_attr *dattr_new);
1041 
1042 /* Allocate an array of sched domains, for partition_sched_domains(). */
1043 cpumask_var_t *alloc_sched_domains(unsigned int ndoms);
1044 void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms);
1045 
1046 bool cpus_share_cache(int this_cpu, int that_cpu);
1047 
1048 typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
1049 typedef int (*sched_domain_flags_f)(void);
1050 
1051 #define SDTL_OVERLAP    0x01
1052 
1053 struct sd_data {
1054         struct sched_domain **__percpu sd;
1055         struct sched_group **__percpu sg;
1056         struct sched_group_capacity **__percpu sgc;
1057 };
1058 
1059 struct sched_domain_topology_level {
1060         sched_domain_mask_f mask;
1061         sched_domain_flags_f sd_flags;
1062         int                 flags;
1063         int                 numa_level;
1064         struct sd_data      data;
1065 #ifdef CONFIG_SCHED_DEBUG
1066         char                *name;
1067 #endif
1068 };
1069 
1070 extern struct sched_domain_topology_level *sched_domain_topology;
1071 
1072 extern void set_sched_topology(struct sched_domain_topology_level *tl);
1073 extern void wake_up_if_idle(int cpu);
1074 
1075 #ifdef CONFIG_SCHED_DEBUG
1076 # define SD_INIT_NAME(type)             .name = #type
1077 #else
1078 # define SD_INIT_NAME(type)
1079 #endif
1080 
1081 #else /* CONFIG_SMP */
1082 
1083 struct sched_domain_attr;
1084 
1085 static inline void
1086 partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
1087                         struct sched_domain_attr *dattr_new)
1088 {
1089 }
1090 
1091 static inline bool cpus_share_cache(int this_cpu, int that_cpu)
1092 {
1093         return true;
1094 }
1095 
1096 #endif  /* !CONFIG_SMP */
1097 
1098 
1099 struct io_context;                      /* See blkdev.h */
1100 
1101 
1102 #ifdef ARCH_HAS_PREFETCH_SWITCH_STACK
1103 extern void prefetch_stack(struct task_struct *t);
1104 #else
1105 static inline void prefetch_stack(struct task_struct *t) { }
1106 #endif
1107 
1108 struct audit_context;           /* See audit.c */
1109 struct mempolicy;
1110 struct pipe_inode_info;
1111 struct uts_namespace;
1112 
1113 struct load_weight {
1114         unsigned long weight;
1115         u32 inv_weight;
1116 };
1117 
1118 struct sched_avg {
1119         u64 last_runnable_update;
1120         s64 decay_count;
1121         /*
1122          * utilization_avg_contrib describes the amount of time that a
1123          * sched_entity is running on a CPU. It is based on running_avg_sum
1124          * and is scaled in the range [0..SCHED_LOAD_SCALE].
1125          * load_avg_contrib described the amount of time that a sched_entity
1126          * is runnable on a rq. It is based on both runnable_avg_sum and the
1127          * weight of the task.
1128          */
1129         unsigned long load_avg_contrib, utilization_avg_contrib;
1130         /*
1131          * These sums represent an infinite geometric series and so are bound
1132          * above by 1024/(1-y).  Thus we only need a u32 to store them for all
1133          * choices of y < 1-2^(-32)*1024.
1134          * running_avg_sum reflects the time that the sched_entity is
1135          * effectively running on the CPU.
1136          * runnable_avg_sum represents the amount of time a sched_entity is on
1137          * a runqueue which includes the running time that is monitored by
1138          * running_avg_sum.
1139          */
1140         u32 runnable_avg_sum, avg_period, running_avg_sum;
1141 };
1142 
1143 #ifdef CONFIG_SCHEDSTATS
1144 struct sched_statistics {
1145         u64                     wait_start;
1146         u64                     wait_max;
1147         u64                     wait_count;
1148         u64                     wait_sum;
1149         u64                     iowait_count;
1150         u64                     iowait_sum;
1151 
1152         u64                     sleep_start;
1153         u64                     sleep_max;
1154         s64                     sum_sleep_runtime;
1155 
1156         u64                     block_start;
1157         u64                     block_max;
1158         u64                     exec_max;
1159         u64                     slice_max;
1160 
1161         u64                     nr_migrations_cold;
1162         u64                     nr_failed_migrations_affine;
1163         u64                     nr_failed_migrations_running;
1164         u64                     nr_failed_migrations_hot;
1165         u64                     nr_forced_migrations;
1166 
1167         u64                     nr_wakeups;
1168         u64                     nr_wakeups_sync;
1169         u64                     nr_wakeups_migrate;
1170         u64                     nr_wakeups_local;
1171         u64                     nr_wakeups_remote;
1172         u64                     nr_wakeups_affine;
1173         u64                     nr_wakeups_affine_attempts;
1174         u64                     nr_wakeups_passive;
1175         u64                     nr_wakeups_idle;
1176 };
1177 #endif
1178 
1179 struct sched_entity {
1180         struct load_weight      load;           /* for load-balancing */
1181         struct rb_node          run_node;
1182         struct list_head        group_node;
1183         unsigned int            on_rq;
1184 
1185         u64                     exec_start;
1186         u64                     sum_exec_runtime;
1187         u64                     vruntime;
1188         u64                     prev_sum_exec_runtime;
1189 
1190         u64                     nr_migrations;
1191 
1192 #ifdef CONFIG_SCHEDSTATS
1193         struct sched_statistics statistics;
1194 #endif
1195 
1196 #ifdef CONFIG_FAIR_GROUP_SCHED
1197         int                     depth;
1198         struct sched_entity     *parent;
1199         /* rq on which this entity is (to be) queued: */
1200         struct cfs_rq           *cfs_rq;
1201         /* rq "owned" by this entity/group: */
1202         struct cfs_rq           *my_q;
1203 #endif
1204 
1205 #ifdef CONFIG_SMP
1206         /* Per-entity load-tracking */
1207         struct sched_avg        avg;
1208 #endif
1209 };
1210 
1211 struct sched_rt_entity {
1212         struct list_head run_list;
1213         unsigned long timeout;
1214         unsigned long watchdog_stamp;
1215         unsigned int time_slice;
1216 
1217         struct sched_rt_entity *back;
1218 #ifdef CONFIG_RT_GROUP_SCHED
1219         struct sched_rt_entity  *parent;
1220         /* rq on which this entity is (to be) queued: */
1221         struct rt_rq            *rt_rq;
1222         /* rq "owned" by this entity/group: */
1223         struct rt_rq            *my_q;
1224 #endif
1225 };
1226 
1227 struct sched_dl_entity {
1228         struct rb_node  rb_node;
1229 
1230         /*
1231          * Original scheduling parameters. Copied here from sched_attr
1232          * during sched_setattr(), they will remain the same until
1233          * the next sched_setattr().
1234          */
1235         u64 dl_runtime;         /* maximum runtime for each instance    */
1236         u64 dl_deadline;        /* relative deadline of each instance   */
1237         u64 dl_period;          /* separation of two instances (period) */
1238         u64 dl_bw;              /* dl_runtime / dl_deadline             */
1239 
1240         /*
1241          * Actual scheduling parameters. Initialized with the values above,
1242          * they are continously updated during task execution. Note that
1243          * the remaining runtime could be < 0 in case we are in overrun.
1244          */
1245         s64 runtime;            /* remaining runtime for this instance  */
1246         u64 deadline;           /* absolute deadline for this instance  */
1247         unsigned int flags;     /* specifying the scheduler behaviour   */
1248 
1249         /*
1250          * Some bool flags:
1251          *
1252          * @dl_throttled tells if we exhausted the runtime. If so, the
1253          * task has to wait for a replenishment to be performed at the
1254          * next firing of dl_timer.
1255          *
1256          * @dl_new tells if a new instance arrived. If so we must
1257          * start executing it with full runtime and reset its absolute
1258          * deadline;
1259          *
1260          * @dl_boosted tells if we are boosted due to DI. If so we are
1261          * outside bandwidth enforcement mechanism (but only until we
1262          * exit the critical section);
1263          *
1264          * @dl_yielded tells if task gave up the cpu before consuming
1265          * all its available runtime during the last job.
1266          */
1267         int dl_throttled, dl_new, dl_boosted, dl_yielded;
1268 
1269         /*
1270          * Bandwidth enforcement timer. Each -deadline task has its
1271          * own bandwidth to be enforced, thus we need one timer per task.
1272          */
1273         struct hrtimer dl_timer;
1274 };
1275 
1276 union rcu_special {
1277         struct {
1278                 bool blocked;
1279                 bool need_qs;
1280         } b;
1281         short s;
1282 };
1283 struct rcu_node;
1284 
1285 enum perf_event_task_context {
1286         perf_invalid_context = -1,
1287         perf_hw_context = 0,
1288         perf_sw_context,
1289         perf_nr_task_contexts,
1290 };
1291 
1292 struct task_struct {
1293         volatile long state;    /* -1 unrunnable, 0 runnable, >0 stopped */
1294         void *stack;
1295         atomic_t usage;
1296         unsigned int flags;     /* per process flags, defined below */
1297         unsigned int ptrace;
1298 
1299 #ifdef CONFIG_SMP
1300         struct llist_node wake_entry;
1301         int on_cpu;
1302         struct task_struct *last_wakee;
1303         unsigned long wakee_flips;
1304         unsigned long wakee_flip_decay_ts;
1305 
1306         int wake_cpu;
1307 #endif
1308         int on_rq;
1309 
1310         int prio, static_prio, normal_prio;
1311         unsigned int rt_priority;
1312         const struct sched_class *sched_class;
1313         struct sched_entity se;
1314         struct sched_rt_entity rt;
1315 #ifdef CONFIG_CGROUP_SCHED
1316         struct task_group *sched_task_group;
1317 #endif
1318         struct sched_dl_entity dl;
1319 
1320 #ifdef CONFIG_PREEMPT_NOTIFIERS
1321         /* list of struct preempt_notifier: */
1322         struct hlist_head preempt_notifiers;
1323 #endif
1324 
1325 #ifdef CONFIG_BLK_DEV_IO_TRACE
1326         unsigned int btrace_seq;
1327 #endif
1328 
1329         unsigned int policy;
1330         int nr_cpus_allowed;
1331         cpumask_t cpus_allowed;
1332 
1333 #ifdef CONFIG_PREEMPT_RCU
1334         int rcu_read_lock_nesting;
1335         union rcu_special rcu_read_unlock_special;
1336         struct list_head rcu_node_entry;
1337 #endif /* #ifdef CONFIG_PREEMPT_RCU */
1338 #ifdef CONFIG_PREEMPT_RCU
1339         struct rcu_node *rcu_blocked_node;
1340 #endif /* #ifdef CONFIG_PREEMPT_RCU */
1341 #ifdef CONFIG_TASKS_RCU
1342         unsigned long rcu_tasks_nvcsw;
1343         bool rcu_tasks_holdout;
1344         struct list_head rcu_tasks_holdout_list;
1345         int rcu_tasks_idle_cpu;
1346 #endif /* #ifdef CONFIG_TASKS_RCU */
1347 
1348 #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
1349         struct sched_info sched_info;
1350 #endif
1351 
1352         struct list_head tasks;
1353 #ifdef CONFIG_SMP
1354         struct plist_node pushable_tasks;
1355         struct rb_node pushable_dl_tasks;
1356 #endif
1357 
1358         struct mm_struct *mm, *active_mm;
1359 #ifdef CONFIG_COMPAT_BRK
1360         unsigned brk_randomized:1;
1361 #endif
1362         /* per-thread vma caching */
1363         u32 vmacache_seqnum;
1364         struct vm_area_struct *vmacache[VMACACHE_SIZE];
1365 #if defined(SPLIT_RSS_COUNTING)
1366         struct task_rss_stat    rss_stat;
1367 #endif
1368 /* task state */
1369         int exit_state;
1370         int exit_code, exit_signal;
1371         int pdeath_signal;  /*  The signal sent when the parent dies  */
1372         unsigned int jobctl;    /* JOBCTL_*, siglock protected */
1373 
1374         /* Used for emulating ABI behavior of previous Linux versions */
1375         unsigned int personality;
1376 
1377         unsigned in_execve:1;   /* Tell the LSMs that the process is doing an
1378                                  * execve */
1379         unsigned in_iowait:1;
1380 
1381         /* Revert to default priority/policy when forking */
1382         unsigned sched_reset_on_fork:1;
1383         unsigned sched_contributes_to_load:1;
1384 
1385 #ifdef CONFIG_MEMCG_KMEM
1386         unsigned memcg_kmem_skip_account:1;
1387 #endif
1388 
1389         unsigned long atomic_flags; /* Flags needing atomic access. */
1390 
1391         struct restart_block restart_block;
1392 
1393         pid_t pid;
1394         pid_t tgid;
1395 
1396 #ifdef CONFIG_CC_STACKPROTECTOR
1397         /* Canary value for the -fstack-protector gcc feature */
1398         unsigned long stack_canary;
1399 #endif
1400         /*
1401          * pointers to (original) parent process, youngest child, younger sibling,
1402          * older sibling, respectively.  (p->father can be replaced with
1403          * p->real_parent->pid)
1404          */
1405         struct task_struct __rcu *real_parent; /* real parent process */
1406         struct task_struct __rcu *parent; /* recipient of SIGCHLD, wait4() reports */
1407         /*
1408          * children/sibling forms the list of my natural children
1409          */
1410         struct list_head children;      /* list of my children */
1411         struct list_head sibling;       /* linkage in my parent's children list */
1412         struct task_struct *group_leader;       /* threadgroup leader */
1413 
1414         /*
1415          * ptraced is the list of tasks this task is using ptrace on.
1416          * This includes both natural children and PTRACE_ATTACH targets.
1417          * p->ptrace_entry is p's link on the p->parent->ptraced list.
1418          */
1419         struct list_head ptraced;
1420         struct list_head ptrace_entry;
1421 
1422         /* PID/PID hash table linkage. */
1423         struct pid_link pids[PIDTYPE_MAX];
1424         struct list_head thread_group;
1425         struct list_head thread_node;
1426 
1427         struct completion *vfork_done;          /* for vfork() */
1428         int __user *set_child_tid;              /* CLONE_CHILD_SETTID */
1429         int __user *clear_child_tid;            /* CLONE_CHILD_CLEARTID */
1430 
1431         cputime_t utime, stime, utimescaled, stimescaled;
1432         cputime_t gtime;
1433 #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
1434         struct cputime prev_cputime;
1435 #endif
1436 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
1437         seqlock_t vtime_seqlock;
1438         unsigned long long vtime_snap;
1439         enum {
1440                 VTIME_SLEEPING = 0,
1441                 VTIME_USER,
1442                 VTIME_SYS,
1443         } vtime_snap_whence;
1444 #endif
1445         unsigned long nvcsw, nivcsw; /* context switch counts */
1446         u64 start_time;         /* monotonic time in nsec */
1447         u64 real_start_time;    /* boot based time in nsec */
1448 /* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
1449         unsigned long min_flt, maj_flt;
1450 
1451         struct task_cputime cputime_expires;
1452         struct list_head cpu_timers[3];
1453 
1454 /* process credentials */
1455         const struct cred __rcu *real_cred; /* objective and real subjective task
1456                                          * credentials (COW) */
1457         const struct cred __rcu *cred;  /* effective (overridable) subjective task
1458                                          * credentials (COW) */
1459         char comm[TASK_COMM_LEN]; /* executable name excluding path
1460                                      - access with [gs]et_task_comm (which lock
1461                                        it with task_lock())
1462                                      - initialized normally by setup_new_exec */
1463 /* file system info */
1464         int link_count, total_link_count;
1465 #ifdef CONFIG_SYSVIPC
1466 /* ipc stuff */
1467         struct sysv_sem sysvsem;
1468         struct sysv_shm sysvshm;
1469 #endif
1470 #ifdef CONFIG_DETECT_HUNG_TASK
1471 /* hung task detection */
1472         unsigned long last_switch_count;
1473 #endif
1474 /* CPU-specific state of this task */
1475         struct thread_struct thread;
1476 /* filesystem information */
1477         struct fs_struct *fs;
1478 /* open file information */
1479         struct files_struct *files;
1480 /* namespaces */
1481         struct nsproxy *nsproxy;
1482 /* signal handlers */
1483         struct signal_struct *signal;
1484         struct sighand_struct *sighand;
1485 
1486         sigset_t blocked, real_blocked;
1487         sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */
1488         struct sigpending pending;
1489 
1490         unsigned long sas_ss_sp;
1491         size_t sas_ss_size;
1492         int (*notifier)(void *priv);
1493         void *notifier_data;
1494         sigset_t *notifier_mask;
1495         struct callback_head *task_works;
1496 
1497         struct audit_context *audit_context;
1498 #ifdef CONFIG_AUDITSYSCALL
1499         kuid_t loginuid;
1500         unsigned int sessionid;
1501 #endif
1502         struct seccomp seccomp;
1503 
1504 /* Thread group tracking */
1505         u32 parent_exec_id;
1506         u32 self_exec_id;
1507 /* Protection of (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed,
1508  * mempolicy */
1509         spinlock_t alloc_lock;
1510 
1511         /* Protection of the PI data structures: */
1512         raw_spinlock_t pi_lock;
1513 
1514 #ifdef CONFIG_RT_MUTEXES
1515         /* PI waiters blocked on a rt_mutex held by this task */
1516         struct rb_root pi_waiters;
1517         struct rb_node *pi_waiters_leftmost;
1518         /* Deadlock detection and priority inheritance handling */
1519         struct rt_mutex_waiter *pi_blocked_on;
1520 #endif
1521 
1522 #ifdef CONFIG_DEBUG_MUTEXES
1523         /* mutex deadlock detection */
1524         struct mutex_waiter *blocked_on;
1525 #endif
1526 #ifdef CONFIG_TRACE_IRQFLAGS
1527         unsigned int irq_events;
1528         unsigned long hardirq_enable_ip;
1529         unsigned long hardirq_disable_ip;
1530         unsigned int hardirq_enable_event;
1531         unsigned int hardirq_disable_event;
1532         int hardirqs_enabled;
1533         int hardirq_context;
1534         unsigned long softirq_disable_ip;
1535         unsigned long softirq_enable_ip;
1536         unsigned int softirq_disable_event;
1537         unsigned int softirq_enable_event;
1538         int softirqs_enabled;
1539         int softirq_context;
1540 #endif
1541 #ifdef CONFIG_LOCKDEP
1542 # define MAX_LOCK_DEPTH 48UL
1543         u64 curr_chain_key;
1544         int lockdep_depth;
1545         unsigned int lockdep_recursion;
1546         struct held_lock held_locks[MAX_LOCK_DEPTH];
1547         gfp_t lockdep_reclaim_gfp;
1548 #endif
1549 
1550 /* journalling filesystem info */
1551         void *journal_info;
1552 
1553 /* stacked block device info */
1554         struct bio_list *bio_list;
1555 
1556 #ifdef CONFIG_BLOCK
1557 /* stack plugging */
1558         struct blk_plug *plug;
1559 #endif
1560 
1561 /* VM state */
1562         struct reclaim_state *reclaim_state;
1563 
1564         struct backing_dev_info *backing_dev_info;
1565 
1566         struct io_context *io_context;
1567 
1568         unsigned long ptrace_message;
1569         siginfo_t *last_siginfo; /* For ptrace use.  */
1570         struct task_io_accounting ioac;
1571 #if defined(CONFIG_TASK_XACCT)
1572         u64 acct_rss_mem1;      /* accumulated rss usage */
1573         u64 acct_vm_mem1;       /* accumulated virtual memory usage */
1574         cputime_t acct_timexpd; /* stime + utime since last update */
1575 #endif
1576 #ifdef CONFIG_CPUSETS
1577         nodemask_t mems_allowed;        /* Protected by alloc_lock */
1578         seqcount_t mems_allowed_seq;    /* Seqence no to catch updates */
1579         int cpuset_mem_spread_rotor;
1580         int cpuset_slab_spread_rotor;
1581 #endif
1582 #ifdef CONFIG_CGROUPS
1583         /* Control Group info protected by css_set_lock */
1584         struct css_set __rcu *cgroups;
1585         /* cg_list protected by css_set_lock and tsk->alloc_lock */
1586         struct list_head cg_list;
1587 #endif
1588 #ifdef CONFIG_FUTEX
1589         struct robust_list_head __user *robust_list;
1590 #ifdef CONFIG_COMPAT
1591         struct compat_robust_list_head __user *compat_robust_list;
1592 #endif
1593         struct list_head pi_state_list;
1594         struct futex_pi_state *pi_state_cache;
1595 #endif
1596 #ifdef CONFIG_PERF_EVENTS
1597         struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];
1598         struct mutex perf_event_mutex;
1599         struct list_head perf_event_list;
1600 #endif
1601 #ifdef CONFIG_DEBUG_PREEMPT
1602         unsigned long preempt_disable_ip;
1603 #endif
1604 #ifdef CONFIG_NUMA
1605         struct mempolicy *mempolicy;    /* Protected by alloc_lock */
1606         short il_next;
1607         short pref_node_fork;
1608 #endif
1609 #ifdef CONFIG_NUMA_BALANCING
1610         int numa_scan_seq;
1611         unsigned int numa_scan_period;
1612         unsigned int numa_scan_period_max;
1613         int numa_preferred_nid;
1614         unsigned long numa_migrate_retry;
1615         u64 node_stamp;                 /* migration stamp  */
1616         u64 last_task_numa_placement;
1617         u64 last_sum_exec_runtime;
1618         struct callback_head numa_work;
1619 
1620         struct list_head numa_entry;
1621         struct numa_group *numa_group;
1622 
1623         /*
1624          * numa_faults is an array split into four regions:
1625          * faults_memory, faults_cpu, faults_memory_buffer, faults_cpu_buffer
1626          * in this precise order.
1627          *
1628          * faults_memory: Exponential decaying average of faults on a per-node
1629          * basis. Scheduling placement decisions are made based on these
1630          * counts. The values remain static for the duration of a PTE scan.
1631          * faults_cpu: Track the nodes the process was running on when a NUMA
1632          * hinting fault was incurred.
1633          * faults_memory_buffer and faults_cpu_buffer: Record faults per node
1634          * during the current scan window. When the scan completes, the counts
1635          * in faults_memory and faults_cpu decay and these values are copied.
1636          */
1637         unsigned long *numa_faults;
1638         unsigned long total_numa_faults;
1639 
1640         /*
1641          * numa_faults_locality tracks if faults recorded during the last
1642          * scan window were remote/local or failed to migrate. The task scan
1643          * period is adapted based on the locality of the faults with different
1644          * weights depending on whether they were shared or private faults
1645          */
1646         unsigned long numa_faults_locality[3];
1647 
1648         unsigned long numa_pages_migrated;
1649 #endif /* CONFIG_NUMA_BALANCING */
1650 
1651         struct rcu_head rcu;
1652 
1653         /*
1654          * cache last used pipe for splice
1655          */
1656         struct pipe_inode_info *splice_pipe;
1657 
1658         struct page_frag task_frag;
1659 
1660 #ifdef  CONFIG_TASK_DELAY_ACCT
1661         struct task_delay_info *delays;
1662 #endif
1663 #ifdef CONFIG_FAULT_INJECTION
1664         int make_it_fail;
1665 #endif
1666         /*
1667          * when (nr_dirtied >= nr_dirtied_pause), it's time to call
1668          * balance_dirty_pages() for some dirty throttling pause
1669          */
1670         int nr_dirtied;
1671         int nr_dirtied_pause;
1672         unsigned long dirty_paused_when; /* start of a write-and-pause period */
1673 
1674 #ifdef CONFIG_LATENCYTOP
1675         int latency_record_count;
1676         struct latency_record latency_record[LT_SAVECOUNT];
1677 #endif
1678         /*
1679          * time slack values; these are used to round up poll() and
1680          * select() etc timeout values. These are in nanoseconds.
1681          */
1682         unsigned long timer_slack_ns;
1683         unsigned long default_timer_slack_ns;
1684 
1685 #ifdef CONFIG_KASAN
1686         unsigned int kasan_depth;
1687 #endif
1688 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1689         /* Index of current stored address in ret_stack */
1690         int curr_ret_stack;
1691         /* Stack of return addresses for return function tracing */
1692         struct ftrace_ret_stack *ret_stack;
1693         /* time stamp for last schedule */
1694         unsigned long long ftrace_timestamp;
1695         /*
1696          * Number of functions that haven't been traced
1697          * because of depth overrun.
1698          */
1699         atomic_t trace_overrun;
1700         /* Pause for the tracing */
1701         atomic_t tracing_graph_pause;
1702 #endif
1703 #ifdef CONFIG_TRACING
1704         /* state flags for use by tracers */
1705         unsigned long trace;
1706         /* bitmask and counter of trace recursion */
1707         unsigned long trace_recursion;
1708 #endif /* CONFIG_TRACING */
1709 #ifdef CONFIG_MEMCG
1710         struct memcg_oom_info {
1711                 struct mem_cgroup *memcg;
1712                 gfp_t gfp_mask;
1713                 int order;
1714                 unsigned int may_oom:1;
1715         } memcg_oom;
1716 #endif
1717 #ifdef CONFIG_UPROBES
1718         struct uprobe_task *utask;
1719 #endif
1720 #if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE)
1721         unsigned int    sequential_io;
1722         unsigned int    sequential_io_avg;
1723 #endif
1724 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
1725         unsigned long   task_state_change;
1726 #endif
1727 };
1728 
1729 /* Future-safe accessor for struct task_struct's cpus_allowed. */
1730 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
1731 
1732 #define TNF_MIGRATED    0x01
1733 #define TNF_NO_GROUP    0x02
1734 #define TNF_SHARED      0x04
1735 #define TNF_FAULT_LOCAL 0x08
1736 #define TNF_MIGRATE_FAIL 0x10
1737 
1738 #ifdef CONFIG_NUMA_BALANCING
1739 extern void task_numa_fault(int last_node, int node, int pages, int flags);
1740 extern pid_t task_numa_group_id(struct task_struct *p);
1741 extern void set_numabalancing_state(bool enabled);
1742 extern void task_numa_free(struct task_struct *p);
1743 extern bool should_numa_migrate_memory(struct task_struct *p, struct page *page,
1744                                         int src_nid, int dst_cpu);
1745 #else
1746 static inline void task_numa_fault(int last_node, int node, int pages,
1747                                    int flags)
1748 {
1749 }
1750 static inline pid_t task_numa_group_id(struct task_struct *p)
1751 {
1752         return 0;
1753 }
1754 static inline void set_numabalancing_state(bool enabled)
1755 {
1756 }
1757 static inline void task_numa_free(struct task_struct *p)
1758 {
1759 }
1760 static inline bool should_numa_migrate_memory(struct task_struct *p,
1761                                 struct page *page, int src_nid, int dst_cpu)
1762 {
1763         return true;
1764 }
1765 #endif
1766 
1767 static inline struct pid *task_pid(struct task_struct *task)
1768 {
1769         return task->pids[PIDTYPE_PID].pid;
1770 }
1771 
1772 static inline struct pid *task_tgid(struct task_struct *task)
1773 {
1774         return task->group_leader->pids[PIDTYPE_PID].pid;
1775 }
1776 
1777 /*
1778  * Without tasklist or rcu lock it is not safe to dereference
1779  * the result of task_pgrp/task_session even if task == current,
1780  * we can race with another thread doing sys_setsid/sys_setpgid.
1781  */
1782 static inline struct pid *task_pgrp(struct task_struct *task)
1783 {
1784         return task->group_leader->pids[PIDTYPE_PGID].pid;
1785 }
1786 
1787 static inline struct pid *task_session(struct task_struct *task)
1788 {
1789         return task->group_leader->pids[PIDTYPE_SID].pid;
1790 }
1791 
1792 struct pid_namespace;
1793 
1794 /*
1795  * the helpers to get the task's different pids as they are seen
1796  * from various namespaces
1797  *
1798  * task_xid_nr()     : global id, i.e. the id seen from the init namespace;
1799  * task_xid_vnr()    : virtual id, i.e. the id seen from the pid namespace of
1800  *                     current.
1801  * task_xid_nr_ns()  : id seen from the ns specified;
1802  *
1803  * set_task_vxid()   : assigns a virtual id to a task;
1804  *
1805  * see also pid_nr() etc in include/linux/pid.h
1806  */
1807 pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
1808                         struct pid_namespace *ns);
1809 
1810 static inline pid_t task_pid_nr(struct task_struct *tsk)
1811 {
1812         return tsk->pid;
1813 }
1814 
1815 static inline pid_t task_pid_nr_ns(struct task_struct *tsk,
1816                                         struct pid_namespace *ns)
1817 {
1818         return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
1819 }
1820 
1821 static inline pid_t task_pid_vnr(struct task_struct *tsk)
1822 {
1823         return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
1824 }
1825 
1826 
1827 static inline pid_t task_tgid_nr(struct task_struct *tsk)
1828 {
1829         return tsk->tgid;
1830 }
1831 
1832 pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
1833 
1834 static inline pid_t task_tgid_vnr(struct task_struct *tsk)
1835 {
1836         return pid_vnr(task_tgid(tsk));
1837 }
1838 
1839 
1840 static inline int pid_alive(const struct task_struct *p);
1841 static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
1842 {
1843         pid_t pid = 0;
1844 
1845         rcu_read_lock();
1846         if (pid_alive(tsk))
1847                 pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
1848         rcu_read_unlock();
1849 
1850         return pid;
1851 }
1852 
1853 static inline pid_t task_ppid_nr(const struct task_struct *tsk)
1854 {
1855         return task_ppid_nr_ns(tsk, &init_pid_ns);
1856 }
1857 
1858 static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk,
1859                                         struct pid_namespace *ns)
1860 {
1861         return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
1862 }
1863 
1864 static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
1865 {
1866         return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
1867 }
1868 
1869 
1870 static inline pid_t task_session_nr_ns(struct task_struct *tsk,
1871                                         struct pid_namespace *ns)
1872 {
1873         return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
1874 }
1875 
1876 static inline pid_t task_session_vnr(struct task_struct *tsk)
1877 {
1878         return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
1879 }
1880 
1881 /* obsolete, do not use */
1882 static inline pid_t task_pgrp_nr(struct task_struct *tsk)
1883 {
1884         return task_pgrp_nr_ns(tsk, &init_pid_ns);
1885 }
1886 
1887 /**
1888  * pid_alive - check that a task structure is not stale
1889  * @p: Task structure to be checked.
1890  *
1891  * Test if a process is not yet dead (at most zombie state)
1892  * If pid_alive fails, then pointers within the task structure
1893  * can be stale and must not be dereferenced.
1894  *
1895  * Return: 1 if the process is alive. 0 otherwise.
1896  */
1897 static inline int pid_alive(const struct task_struct *p)
1898 {
1899         return p->pids[PIDTYPE_PID].pid != NULL;
1900 }
1901 
1902 /**
1903  * is_global_init - check if a task structure is init
1904  * @tsk: Task structure to be checked.
1905  *
1906  * Check if a task structure is the first user space task the kernel created.
1907  *
1908  * Return: 1 if the task structure is init. 0 otherwise.
1909  */
1910 static inline int is_global_init(struct task_struct *tsk)
1911 {
1912         return tsk->pid == 1;
1913 }
1914 
1915 extern struct pid *cad_pid;
1916 
1917 extern void free_task(struct task_struct *tsk);
1918 #define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
1919 
1920 extern void __put_task_struct(struct task_struct *t);
1921 
1922 static inline void put_task_struct(struct task_struct *t)
1923 {
1924         if (atomic_dec_and_test(&t->usage))
1925                 __put_task_struct(t);
1926 }
1927 
1928 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
1929 extern void task_cputime(struct task_struct *t,
1930                          cputime_t *utime, cputime_t *stime);
1931 extern void task_cputime_scaled(struct task_struct *t,
1932                                 cputime_t *utimescaled, cputime_t *stimescaled);
1933 extern cputime_t task_gtime(struct task_struct *t);
1934 #else
1935 static inline void task_cputime(struct task_struct *t,
1936                                 cputime_t *utime, cputime_t *stime)
1937 {
1938         if (utime)
1939                 *utime = t->utime;
1940         if (stime)
1941                 *stime = t->stime;
1942 }
1943 
1944 static inline void task_cputime_scaled(struct task_struct *t,
1945                                        cputime_t *utimescaled,
1946                                        cputime_t *stimescaled)
1947 {
1948         if (utimescaled)
1949                 *utimescaled = t->utimescaled;
1950         if (stimescaled)
1951                 *stimescaled = t->stimescaled;
1952 }
1953 
1954 static inline cputime_t task_gtime(struct task_struct *t)
1955 {
1956         return t->gtime;
1957 }
1958 #endif
1959 extern void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
1960 extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
1961 
1962 /*
1963  * Per process flags
1964  */
1965 #define PF_EXITING      0x00000004      /* getting shut down */
1966 #define PF_EXITPIDONE   0x00000008      /* pi exit done on shut down */
1967 #define PF_VCPU         0x00000010      /* I'm a virtual CPU */
1968 #define PF_WQ_WORKER    0x00000020      /* I'm a workqueue worker */
1969 #define PF_FORKNOEXEC   0x00000040      /* forked but didn't exec */
1970 #define PF_MCE_PROCESS  0x00000080      /* process policy on mce errors */
1971 #define PF_SUPERPRIV    0x00000100      /* used super-user privileges */
1972 #define PF_DUMPCORE     0x00000200      /* dumped core */
1973 #define PF_SIGNALED     0x00000400      /* killed by a signal */
1974 #define PF_MEMALLOC     0x00000800      /* Allocating memory */
1975 #define PF_NPROC_EXCEEDED 0x00001000    /* set_user noticed that RLIMIT_NPROC was exceeded */
1976 #define PF_USED_MATH    0x00002000      /* if unset the fpu must be initialized before use */
1977 #define PF_USED_ASYNC   0x00004000      /* used async_schedule*(), used by module init */
1978 #define PF_NOFREEZE     0x00008000      /* this thread should not be frozen */
1979 #define PF_FROZEN       0x00010000      /* frozen for system suspend */
1980 #define PF_FSTRANS      0x00020000      /* inside a filesystem transaction */
1981 #define PF_KSWAPD       0x00040000      /* I am kswapd */
1982 #define PF_MEMALLOC_NOIO 0x00080000     /* Allocating memory without IO involved */
1983 #define PF_LESS_THROTTLE 0x00100000     /* Throttle me less: I clean memory */
1984 #define PF_KTHREAD      0x00200000      /* I am a kernel thread */
1985 #define PF_RANDOMIZE    0x00400000      /* randomize virtual address space */
1986 #define PF_SWAPWRITE    0x00800000      /* Allowed to write to swap */
1987 #define PF_NO_SETAFFINITY 0x04000000    /* Userland is not allowed to meddle with cpus_allowed */
1988 #define PF_MCE_EARLY    0x08000000      /* Early kill for mce process policy */
1989 #define PF_MUTEX_TESTER 0x20000000      /* Thread belongs to the rt mutex tester */
1990 #define PF_FREEZER_SKIP 0x40000000      /* Freezer should not count it as freezable */
1991 #define PF_SUSPEND_TASK 0x80000000      /* this thread called freeze_processes and should not be frozen */
1992 
1993 /*
1994  * Only the _current_ task can read/write to tsk->flags, but other
1995  * tasks can access tsk->flags in readonly mode for example
1996  * with tsk_used_math (like during threaded core dumping).
1997  * There is however an exception to this rule during ptrace
1998  * or during fork: the ptracer task is allowed to write to the
1999  * child->flags of its traced child (same goes for fork, the parent
2000  * can write to the child->flags), because we're guaranteed the
2001  * child is not running and in turn not changing child->flags
2002  * at the same time the parent does it.
2003  */
2004 #define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)
2005 #define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)
2006 #define clear_used_math() clear_stopped_child_used_math(current)
2007 #define set_used_math() set_stopped_child_used_math(current)
2008 #define conditional_stopped_child_used_math(condition, child) \
2009         do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
2010 #define conditional_used_math(condition) \
2011         conditional_stopped_child_used_math(condition, current)
2012 #define copy_to_stopped_child_used_math(child) \
2013         do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
2014 /* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */
2015 #define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
2016 #define used_math() tsk_used_math(current)
2017 
2018 /* __GFP_IO isn't allowed if PF_MEMALLOC_NOIO is set in current->flags
2019  * __GFP_FS is also cleared as it implies __GFP_IO.
2020  */
2021 static inline gfp_t memalloc_noio_flags(gfp_t flags)
2022 {
2023         if (unlikely(current->flags & PF_MEMALLOC_NOIO))
2024                 flags &= ~(__GFP_IO | __GFP_FS);
2025         return flags;
2026 }
2027 
2028 static inline unsigned int memalloc_noio_save(void)
2029 {
2030         unsigned int flags = current->flags & PF_MEMALLOC_NOIO;
2031         current->flags |= PF_MEMALLOC_NOIO;
2032         return flags;
2033 }
2034 
2035 static inline void memalloc_noio_restore(unsigned int flags)
2036 {
2037         current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags;
2038 }
2039 
2040 /* Per-process atomic flags. */
2041 #define PFA_NO_NEW_PRIVS 0      /* May not gain new privileges. */
2042 #define PFA_SPREAD_PAGE  1      /* Spread page cache over cpuset */
2043 #define PFA_SPREAD_SLAB  2      /* Spread some slab caches over cpuset */
2044 
2045 
2046 #define TASK_PFA_TEST(name, func)                                       \
2047         static inline bool task_##func(struct task_struct *p)           \
2048         { return test_bit(PFA_##name, &p->atomic_flags); }
2049 #define TASK_PFA_SET(name, func)                                        \
2050         static inline void task_set_##func(struct task_struct *p)       \
2051         { set_bit(PFA_##name, &p->atomic_flags); }
2052 #define TASK_PFA_CLEAR(name, func)                                      \
2053         static inline void task_clear_##func(struct task_struct *p)     \
2054         { clear_bit(PFA_##name, &p->atomic_flags); }
2055 
2056 TASK_PFA_TEST(NO_NEW_PRIVS, no_new_privs)
2057 TASK_PFA_SET(NO_NEW_PRIVS, no_new_privs)
2058 
2059 TASK_PFA_TEST(SPREAD_PAGE, spread_page)
2060 TASK_PFA_SET(SPREAD_PAGE, spread_page)
2061 TASK_PFA_CLEAR(SPREAD_PAGE, spread_page)
2062 
2063 TASK_PFA_TEST(SPREAD_SLAB, spread_slab)
2064 TASK_PFA_SET(SPREAD_SLAB, spread_slab)
2065 TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab)
2066 
2067 /*
2068  * task->jobctl flags
2069  */
2070 #define JOBCTL_STOP_SIGMASK     0xffff  /* signr of the last group stop */
2071 
2072 #define JOBCTL_STOP_DEQUEUED_BIT 16     /* stop signal dequeued */
2073 #define JOBCTL_STOP_PENDING_BIT 17      /* task should stop for group stop */
2074 #define JOBCTL_STOP_CONSUME_BIT 18      /* consume group stop count */
2075 #define JOBCTL_TRAP_STOP_BIT    19      /* trap for STOP */
2076 #define JOBCTL_TRAP_NOTIFY_BIT  20      /* trap for NOTIFY */
2077 #define JOBCTL_TRAPPING_BIT     21      /* switching to TRACED */
2078 #define JOBCTL_LISTENING_BIT    22      /* ptracer is listening for events */
2079 
2080 #define JOBCTL_STOP_DEQUEUED    (1 << JOBCTL_STOP_DEQUEUED_BIT)
2081 #define JOBCTL_STOP_PENDING     (1 << JOBCTL_STOP_PENDING_BIT)
2082 #define JOBCTL_STOP_CONSUME     (1 << JOBCTL_STOP_CONSUME_BIT)
2083 #define JOBCTL_TRAP_STOP        (1 << JOBCTL_TRAP_STOP_BIT)
2084 #define JOBCTL_TRAP_NOTIFY      (1 << JOBCTL_TRAP_NOTIFY_BIT)
2085 #define JOBCTL_TRAPPING         (1 << JOBCTL_TRAPPING_BIT)
2086 #define JOBCTL_LISTENING        (1 << JOBCTL_LISTENING_BIT)
2087 
2088 #define JOBCTL_TRAP_MASK        (JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY)
2089 #define JOBCTL_PENDING_MASK     (JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK)
2090 
2091 extern bool task_set_jobctl_pending(struct task_struct *task,
2092                                     unsigned int mask);
2093 extern void task_clear_jobctl_trapping(struct task_struct *task);
2094 extern void task_clear_jobctl_pending(struct task_struct *task,
2095                                       unsigned int mask);
2096 
2097 static inline void rcu_copy_process(struct task_struct *p)
2098 {
2099 #ifdef CONFIG_PREEMPT_RCU
2100         p->rcu_read_lock_nesting = 0;
2101         p->rcu_read_unlock_special.s = 0;
2102         p->rcu_blocked_node = NULL;
2103         INIT_LIST_HEAD(&p->rcu_node_entry);
2104 #endif /* #ifdef CONFIG_PREEMPT_RCU */
2105 #ifdef CONFIG_TASKS_RCU
2106         p->rcu_tasks_holdout = false;
2107         INIT_LIST_HEAD(&p->rcu_tasks_holdout_list);
2108         p->rcu_tasks_idle_cpu = -1;
2109 #endif /* #ifdef CONFIG_TASKS_RCU */
2110 }
2111 
2112 static inline void tsk_restore_flags(struct task_struct *task,
2113                                 unsigned long orig_flags, unsigned long flags)
2114 {
2115         task->flags &= ~flags;
2116         task->flags |= orig_flags & flags;
2117 }
2118 
2119 extern int cpuset_cpumask_can_shrink(const struct cpumask *cur,
2120                                      const struct cpumask *trial);
2121 extern int task_can_attach(struct task_struct *p,
2122                            const struct cpumask *cs_cpus_allowed);
2123 #ifdef CONFIG_SMP
2124 extern void do_set_cpus_allowed(struct task_struct *p,
2125                                const struct cpumask *new_mask);
2126 
2127 extern int set_cpus_allowed_ptr(struct task_struct *p,
2128                                 const struct cpumask *new_mask);
2129 #else
2130 static inline void do_set_cpus_allowed(struct task_struct *p,
2131                                       const struct cpumask *new_mask)
2132 {
2133 }
2134 static inline int set_cpus_allowed_ptr(struct task_struct *p,
2135                                        const struct cpumask *new_mask)
2136 {
2137         if (!cpumask_test_cpu(0, new_mask))
2138                 return -EINVAL;
2139         return 0;
2140 }
2141 #endif
2142 
2143 #ifdef CONFIG_NO_HZ_COMMON
2144 void calc_load_enter_idle(void);
2145 void calc_load_exit_idle(void);
2146 #else
2147 static inline void calc_load_enter_idle(void) { }
2148 static inline void calc_load_exit_idle(void) { }
2149 #endif /* CONFIG_NO_HZ_COMMON */
2150 
2151 #ifndef CONFIG_CPUMASK_OFFSTACK
2152 static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
2153 {
2154         return set_cpus_allowed_ptr(p, &new_mask);
2155 }
2156 #endif
2157 
2158 /*
2159  * Do not use outside of architecture code which knows its limitations.
2160  *
2161  * sched_clock() has no promise of monotonicity or bounded drift between
2162  * CPUs, use (which you should not) requires disabling IRQs.
2163  *
2164  * Please use one of the three interfaces below.
2165  */
2166 extern unsigned long long notrace sched_clock(void);
2167 /*
2168  * See the comment in kernel/sched/clock.c
2169  */
2170 extern u64 cpu_clock(int cpu);
2171 extern u64 local_clock(void);
2172 extern u64 running_clock(void);
2173 extern u64 sched_clock_cpu(int cpu);
2174 
2175 
2176 extern void sched_clock_init(void);
2177 
2178 #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
2179 static inline void sched_clock_tick(void)
2180 {
2181 }
2182 
2183 static inline void sched_clock_idle_sleep_event(void)
2184 {
2185 }
2186 
2187 static inline void sched_clock_idle_wakeup_event(u64 delta_ns)
2188 {
2189 }
2190 #else
2191 /*
2192  * Architectures can set this to 1 if they have specified
2193  * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig,
2194  * but then during bootup it turns out that sched_clock()
2195  * is reliable after all:
2196  */
2197 extern int sched_clock_stable(void);
2198 extern void set_sched_clock_stable(void);
2199 extern void clear_sched_clock_stable(void);
2200 
2201 extern void sched_clock_tick(void);
2202 extern void sched_clock_idle_sleep_event(void);
2203 extern void sched_clock_idle_wakeup_event(u64 delta_ns);
2204 #endif
2205 
2206 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
2207 /*
2208  * An i/f to runtime opt-in for irq time accounting based off of sched_clock.
2209  * The reason for this explicit opt-in is not to have perf penalty with
2210  * slow sched_clocks.
2211  */
2212 extern void enable_sched_clock_irqtime(void);
2213 extern void disable_sched_clock_irqtime(void);
2214 #else
2215 static inline void enable_sched_clock_irqtime(void) {}
2216 static inline void disable_sched_clock_irqtime(void) {}
2217 #endif
2218 
2219 extern unsigned long long
2220 task_sched_runtime(struct task_struct *task);
2221 
2222 /* sched_exec is called by processes performing an exec */
2223 #ifdef CONFIG_SMP
2224 extern void sched_exec(void);
2225 #else
2226 #define sched_exec()   {}
2227 #endif
2228 
2229 extern void sched_clock_idle_sleep_event(void);
2230 extern void sched_clock_idle_wakeup_event(u64 delta_ns);
2231 
2232 #ifdef CONFIG_HOTPLUG_CPU
2233 extern void idle_task_exit(void);
2234 #else
2235 static inline void idle_task_exit(void) {}
2236 #endif
2237 
2238 #if defined(CONFIG_NO_HZ_COMMON) && defined(CONFIG_SMP)
2239 extern void wake_up_nohz_cpu(int cpu);
2240 #else
2241 static inline void wake_up_nohz_cpu(int cpu) { }
2242 #endif
2243 
2244 #ifdef CONFIG_NO_HZ_FULL
2245 extern bool sched_can_stop_tick(void);
2246 extern u64 scheduler_tick_max_deferment(void);
2247 #else
2248 static inline bool sched_can_stop_tick(void) { return false; }
2249 #endif
2250 
2251 #ifdef CONFIG_SCHED_AUTOGROUP
2252 extern void sched_autogroup_create_attach(struct task_struct *p);
2253 extern void sched_autogroup_detach(struct task_struct *p);
2254 extern void sched_autogroup_fork(struct signal_struct *sig);
2255 extern void sched_autogroup_exit(struct signal_struct *sig);
2256 #ifdef CONFIG_PROC_FS
2257 extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m);
2258 extern int proc_sched_autogroup_set_nice(struct task_struct *p, int nice);
2259 #endif
2260 #else
2261 static inline void sched_autogroup_create_attach(struct task_struct *p) { }
2262 static inline void sched_autogroup_detach(struct task_struct *p) { }
2263 static inline void sched_autogroup_fork(struct signal_struct *sig) { }
2264 static inline void sched_autogroup_exit(struct signal_struct *sig) { }
2265 #endif
2266 
2267 extern int yield_to(struct task_struct *p, bool preempt);
2268 extern void set_user_nice(struct task_struct *p, long nice);
2269 extern int task_prio(const struct task_struct *p);
2270 /**
2271  * task_nice - return the nice value of a given task.
2272  * @p: the task in question.
2273  *
2274  * Return: The nice value [ -20 ... 0 ... 19 ].
2275  */
2276 static inline int task_nice(const struct task_struct *p)
2277 {
2278         return PRIO_TO_NICE((p)->static_prio);
2279 }
2280 extern int can_nice(const struct task_struct *p, const int nice);
2281 extern int task_curr(const struct task_struct *p);
2282 extern int idle_cpu(int cpu);
2283 extern int sched_setscheduler(struct task_struct *, int,
2284                               const struct sched_param *);
2285 extern int sched_setscheduler_nocheck(struct task_struct *, int,
2286                                       const struct sched_param *);
2287 extern int sched_setattr(struct task_struct *,
2288                          const struct sched_attr *);
2289 extern struct task_struct *idle_task(int cpu);
2290 /**
2291  * is_idle_task - is the specified task an idle task?
2292  * @p: the task in question.
2293  *
2294  * Return: 1 if @p is an idle task. 0 otherwise.
2295  */
2296 static inline bool is_idle_task(const struct task_struct *p)
2297 {
2298         return p->pid == 0;
2299 }
2300 extern struct task_struct *curr_task(int cpu);
2301 extern void set_curr_task(int cpu, struct task_struct *p);
2302 
2303 void yield(void);
2304 
2305 union thread_union {
2306         struct thread_info thread_info;
2307         unsigned long stack[THREAD_SIZE/sizeof(long)];
2308 };
2309 
2310 #ifndef __HAVE_ARCH_KSTACK_END
2311 static inline int kstack_end(void *addr)
2312 {
2313         /* Reliable end of stack detection:
2314          * Some APM bios versions misalign the stack
2315          */
2316         return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*)));
2317 }
2318 #endif
2319 
2320 extern union thread_union init_thread_union;
2321 extern struct task_struct init_task;
2322 
2323 extern struct   mm_struct init_mm;
2324 
2325 extern struct pid_namespace init_pid_ns;
2326 
2327 /*
2328  * find a task by one of its numerical ids
2329  *
2330  * find_task_by_pid_ns():
2331  *      finds a task by its pid in the specified namespace
2332  * find_task_by_vpid():
2333  *      finds a task by its virtual pid
2334  *
2335  * see also find_vpid() etc in include/linux/pid.h
2336  */
2337 
2338 extern struct task_struct *find_task_by_vpid(pid_t nr);
2339 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
2340                 struct pid_namespace *ns);
2341 
2342 /* per-UID process charging. */
2343 extern struct user_struct * alloc_uid(kuid_t);
2344 static inline struct user_struct *get_uid(struct user_struct *u)
2345 {
2346         atomic_inc(&u->__count);
2347         return u;
2348 }
2349 extern void free_uid(struct user_struct *);
2350 
2351 #include <asm/current.h>
2352 
2353 extern void xtime_update(unsigned long ticks);
2354 
2355 extern int wake_up_state(struct task_struct *tsk, unsigned int state);
2356 extern int wake_up_process(struct task_struct *tsk);
2357 extern void wake_up_new_task(struct task_struct *tsk);
2358 #ifdef CONFIG_SMP
2359  extern void kick_process(struct task_struct *tsk);
2360 #else
2361  static inline void kick_process(struct task_struct *tsk) { }
2362 #endif
2363 extern int sched_fork(unsigned long clone_flags, struct task_struct *p);
2364 extern void sched_dead(struct task_struct *p);
2365 
2366 extern void proc_caches_init(void);
2367 extern void flush_signals(struct task_struct *);
2368 extern void __flush_signals(struct task_struct *);
2369 extern void ignore_signals(struct task_struct *);
2370 extern void flush_signal_handlers(struct task_struct *, int force_default);
2371 extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info);
2372 
2373 static inline int dequeue_signal_lock(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
2374 {
2375         unsigned long flags;
2376         int ret;
2377 
2378         spin_lock_irqsave(&tsk->sighand->siglock, flags);
2379         ret = dequeue_signal(tsk, mask, info);
2380         spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
2381 
2382         return ret;
2383 }
2384 
2385 extern void block_all_signals(int (*notifier)(void *priv), void *priv,
2386                               sigset_t *mask);
2387 extern void unblock_all_signals(void);
2388 extern void release_task(struct task_struct * p);
2389 extern int send_sig_info(int, struct siginfo *, struct task_struct *);
2390 extern int force_sigsegv(int, struct task_struct *);
2391 extern int force_sig_info(int, struct siginfo *, struct task_struct *);
2392 extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp);
2393 extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid);
2394 extern int kill_pid_info_as_cred(int, struct siginfo *, struct pid *,
2395                                 const struct cred *, u32);
2396 extern int kill_pgrp(struct pid *pid, int sig, int priv);
2397 extern int kill_pid(struct pid *pid, int sig, int priv);
2398 extern int kill_proc_info(int, struct siginfo *, pid_t);
2399 extern __must_check bool do_notify_parent(struct task_struct *, int);
2400 extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent);
2401 extern void force_sig(int, struct task_struct *);
2402 extern int send_sig(int, struct task_struct *, int);
2403 extern int zap_other_threads(struct task_struct *p);
2404 extern struct sigqueue *sigqueue_alloc(void);
2405 extern void sigqueue_free(struct sigqueue *);
2406 extern int send_sigqueue(struct sigqueue *,  struct task_struct *, int group);
2407 extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);
2408 
2409 static inline void restore_saved_sigmask(void)
2410 {
2411         if (test_and_clear_restore_sigmask())
2412                 __set_current_blocked(&current->saved_sigmask);
2413 }
2414 
2415 static inline sigset_t *sigmask_to_save(void)
2416 {
2417         sigset_t *res = &current->blocked;
2418         if (unlikely(test_restore_sigmask()))
2419                 res = &current->saved_sigmask;
2420         return res;
2421 }
2422 
2423 static inline int kill_cad_pid(int sig, int priv)
2424 {
2425         return kill_pid(cad_pid, sig, priv);
2426 }
2427 
2428 /* These can be the second arg to send_sig_info/send_group_sig_info.  */
2429 #define SEND_SIG_NOINFO ((struct siginfo *) 0)
2430 #define SEND_SIG_PRIV   ((struct siginfo *) 1)
2431 #define SEND_SIG_FORCED ((struct siginfo *) 2)
2432 
2433 /*
2434  * True if we are on the alternate signal stack.
2435  */
2436 static inline int on_sig_stack(unsigned long sp)
2437 {
2438 #ifdef CONFIG_STACK_GROWSUP
2439         return sp >= current->sas_ss_sp &&
2440                 sp - current->sas_ss_sp < current->sas_ss_size;
2441 #else
2442         return sp > current->sas_ss_sp &&
2443                 sp - current->sas_ss_sp <= current->sas_ss_size;
2444 #endif
2445 }
2446 
2447 static inline int sas_ss_flags(unsigned long sp)
2448 {
2449         if (!current->sas_ss_size)
2450                 return SS_DISABLE;
2451 
2452         return on_sig_stack(sp) ? SS_ONSTACK : 0;
2453 }
2454 
2455 static inline unsigned long sigsp(unsigned long sp, struct ksignal *ksig)
2456 {
2457         if (unlikely((ksig->ka.sa.sa_flags & SA_ONSTACK)) && ! sas_ss_flags(sp))
2458 #ifdef CONFIG_STACK_GROWSUP
2459                 return current->sas_ss_sp;
2460 #else
2461                 return current->sas_ss_sp + current->sas_ss_size;
2462 #endif
2463         return sp;
2464 }
2465 
2466 /*
2467  * Routines for handling mm_structs
2468  */
2469 extern struct mm_struct * mm_alloc(void);
2470 
2471 /* mmdrop drops the mm and the page tables */
2472 extern void __mmdrop(struct mm_struct *);
2473 static inline void mmdrop(struct mm_struct * mm)
2474 {
2475         if (unlikely(atomic_dec_and_test(&mm->mm_count)))
2476                 __mmdrop(mm);
2477 }
2478 
2479 /* mmput gets rid of the mappings and all user-space */
2480 extern void mmput(struct mm_struct *);
2481 /* Grab a reference to a task's mm, if it is not already going away */
2482 extern struct mm_struct *get_task_mm(struct task_struct *task);
2483 /*
2484  * Grab a reference to a task's mm, if it is not already going away
2485  * and ptrace_may_access with the mode parameter passed to it
2486  * succeeds.
2487  */
2488 extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
2489 /* Remove the current tasks stale references to the old mm_struct */
2490 extern void mm_release(struct task_struct *, struct mm_struct *);
2491 
2492 extern int copy_thread(unsigned long, unsigned long, unsigned long,
2493                         struct task_struct *);
2494 extern void flush_thread(void);
2495 extern void exit_thread(void);
2496 
2497 extern void exit_files(struct task_struct *);
2498 extern void __cleanup_sighand(struct sighand_struct *);
2499 
2500 extern void exit_itimers(struct signal_struct *);
2501 extern void flush_itimer_signals(void);
2502 
2503 extern void do_group_exit(int);
2504 
2505 extern int do_execve(struct filename *,
2506                      const char __user * const __user *,
2507                      const char __user * const __user *);
2508 extern int do_execveat(int, struct filename *,
2509                        const char __user * const __user *,
2510                        const char __user * const __user *,
2511                        int);
2512 extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *);
2513 struct task_struct *fork_idle(int);
2514 extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
2515 
2516 extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec);
2517 static inline void set_task_comm(struct task_struct *tsk, const char *from)
2518 {
2519         __set_task_comm(tsk, from, false);
2520 }
2521 extern char *get_task_comm(char *to, struct task_struct *tsk);
2522 
2523 #ifdef CONFIG_SMP
2524 void scheduler_ipi(void);
2525 extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
2526 #else
2527 static inline void scheduler_ipi(void) { }
2528 static inline unsigned long wait_task_inactive(struct task_struct *p,
2529                                                long match_state)
2530 {
2531         return 1;
2532 }
2533 #endif
2534 
2535 #define next_task(p) \
2536         list_entry_rcu((p)->tasks.next, struct task_struct, tasks)
2537 
2538 #define for_each_process(p) \
2539         for (p = &init_task ; (p = next_task(p)) != &init_task ; )
2540 
2541 extern bool current_is_single_threaded(void);
2542 
2543 /*
2544  * Careful: do_each_thread/while_each_thread is a double loop so
2545  *          'break' will not work as expected - use goto instead.
2546  */
2547 #define do_each_thread(g, t) \
2548         for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do
2549 
2550 #define while_each_thread(g, t) \
2551         while ((t = next_thread(t)) != g)
2552 
2553 #define __for_each_thread(signal, t)    \
2554         list_for_each_entry_rcu(t, &(signal)->thread_head, thread_node)
2555 
2556 #define for_each_thread(p, t)           \
2557         __for_each_thread((p)->signal, t)
2558 
2559 /* Careful: this is a double loop, 'break' won't work as expected. */
2560 #define for_each_process_thread(p, t)   \
2561         for_each_process(p) for_each_thread(p, t)
2562 
2563 static inline int get_nr_threads(struct task_struct *tsk)
2564 {
2565         return tsk->signal->nr_threads;
2566 }
2567 
2568 static inline bool thread_group_leader(struct task_struct *p)
2569 {
2570         return p->exit_signal >= 0;
2571 }
2572 
2573 /* Do to the insanities of de_thread it is possible for a process
2574  * to have the pid of the thread group leader without actually being
2575  * the thread group leader.  For iteration through the pids in proc
2576  * all we care about is that we have a task with the appropriate
2577  * pid, we don't actually care if we have the right task.
2578  */
2579 static inline bool has_group_leader_pid(struct task_struct *p)
2580 {
2581         return task_pid(p) == p->signal->leader_pid;
2582 }
2583 
2584 static inline
2585 bool same_thread_group(struct task_struct *p1, struct task_struct *p2)
2586 {
2587         return p1->signal == p2->signal;
2588 }
2589 
2590 static inline struct task_struct *next_thread(const struct task_struct *p)
2591 {
2592         return list_entry_rcu(p->thread_group.next,
2593                               struct task_struct, thread_group);
2594 }
2595 
2596 static inline int thread_group_empty(struct task_struct *p)
2597 {
2598         return list_empty(&p->thread_group);
2599 }
2600 
2601 #define delay_group_leader(p) \
2602                 (thread_group_leader(p) && !thread_group_empty(p))
2603 
2604 /*
2605  * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring
2606  * subscriptions and synchronises with wait4().  Also used in procfs.  Also
2607  * pins the final release of task.io_context.  Also protects ->cpuset and
2608  * ->cgroup.subsys[]. And ->vfork_done.
2609  *
2610  * Nests both inside and outside of read_lock(&tasklist_lock).
2611  * It must not be nested with write_lock_irq(&tasklist_lock),
2612  * neither inside nor outside.
2613  */
2614 static inline void task_lock(struct task_struct *p)
2615 {
2616         spin_lock(&p->alloc_lock);
2617 }
2618 
2619 static inline void task_unlock(struct task_struct *p)
2620 {
2621         spin_unlock(&p->alloc_lock);
2622 }
2623 
2624 extern struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
2625                                                         unsigned long *flags);
2626 
2627 static inline struct sighand_struct *lock_task_sighand(struct task_struct *tsk,
2628                                                        unsigned long *flags)
2629 {
2630         struct sighand_struct *ret;
2631 
2632         ret = __lock_task_sighand(tsk, flags);
2633         (void)__cond_lock(&tsk->sighand->siglock, ret);
2634         return ret;
2635 }
2636 
2637 static inline void unlock_task_sighand(struct task_struct *tsk,
2638                                                 unsigned long *flags)
2639 {
2640         spin_unlock_irqrestore(&tsk->sighand->siglock, *flags);
2641 }
2642 
2643 #ifdef CONFIG_CGROUPS
2644 static inline void threadgroup_change_begin(struct task_struct *tsk)
2645 {
2646         down_read(&tsk->signal->group_rwsem);
2647 }
2648 static inline void threadgroup_change_end(struct task_struct *tsk)
2649 {
2650         up_read(&tsk->signal->group_rwsem);
2651 }
2652 
2653 /**
2654  * threadgroup_lock - lock threadgroup
2655  * @tsk: member task of the threadgroup to lock
2656  *
2657  * Lock the threadgroup @tsk belongs to.  No new task is allowed to enter
2658  * and member tasks aren't allowed to exit (as indicated by PF_EXITING) or
2659  * change ->group_leader/pid.  This is useful for cases where the threadgroup
2660  * needs to stay stable across blockable operations.
2661  *
2662  * fork and exit paths explicitly call threadgroup_change_{begin|end}() for
2663  * synchronization.  While held, no new task will be added to threadgroup
2664  * and no existing live task will have its PF_EXITING set.
2665  *
2666  * de_thread() does threadgroup_change_{begin|end}() when a non-leader
2667  * sub-thread becomes a new leader.
2668  */
2669 static inline void threadgroup_lock(struct task_struct *tsk)
2670 {
2671         down_write(&tsk->signal->group_rwsem);
2672 }
2673 
2674 /**
2675  * threadgroup_unlock - unlock threadgroup
2676  * @tsk: member task of the threadgroup to unlock
2677  *
2678  * Reverse threadgroup_lock().
2679  */
2680 static inline void threadgroup_unlock(struct task_struct *tsk)
2681 {
2682         up_write(&tsk->signal->group_rwsem);
2683 }
2684 #else
2685 static inline void threadgroup_change_begin(struct task_struct *tsk) {}
2686 static inline void threadgroup_change_end(struct task_struct *tsk) {}
2687 static inline void threadgroup_lock(struct task_struct *tsk) {}
2688 static inline void threadgroup_unlock(struct task_struct *tsk) {}
2689 #endif
2690 
2691 #ifndef __HAVE_THREAD_FUNCTIONS
2692 
2693 #define task_thread_info(task)  ((struct thread_info *)(task)->stack)
2694 #define task_stack_page(task)   ((task)->stack)
2695 
2696 static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org)
2697 {
2698         *task_thread_info(p) = *task_thread_info(org);
2699         task_thread_info(p)->task = p;
2700 }
2701 
2702 /*
2703  * Return the address of the last usable long on the stack.
2704  *
2705  * When the stack grows down, this is just above the thread
2706  * info struct. Going any lower will corrupt the threadinfo.
2707  *
2708  * When the stack grows up, this is the highest address.
2709  * Beyond that position, we corrupt data on the next page.
2710  */
2711 static inline unsigned long *end_of_stack(struct task_struct *p)
2712 {
2713 #ifdef CONFIG_STACK_GROWSUP
2714         return (unsigned long *)((unsigned long)task_thread_info(p) + THREAD_SIZE) - 1;
2715 #else
2716         return (unsigned long *)(task_thread_info(p) + 1);
2717 #endif
2718 }
2719 
2720 #endif
2721 #define task_stack_end_corrupted(task) \
2722                 (*(end_of_stack(task)) != STACK_END_MAGIC)
2723 
2724 static inline int object_is_on_stack(void *obj)
2725 {
2726         void *stack = task_stack_page(current);
2727 
2728         return (obj >= stack) && (obj < (stack + THREAD_SIZE));
2729 }
2730 
2731 extern void thread_info_cache_init(void);
2732 
2733 #ifdef CONFIG_DEBUG_STACK_USAGE
2734 static inline unsigned long stack_not_used(struct task_struct *p)
2735 {
2736         unsigned long *n = end_of_stack(p);
2737 
2738         do {    /* Skip over canary */
2739                 n++;
2740         } while (!*n);
2741 
2742         return (unsigned long)n - (unsigned long)end_of_stack(p);
2743 }
2744 #endif
2745 extern void set_task_stack_end_magic(struct task_struct *tsk);
2746 
2747 /* set thread flags in other task's structures
2748  * - see asm/thread_info.h for TIF_xxxx flags available
2749  */
2750 static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
2751 {
2752         set_ti_thread_flag(task_thread_info(tsk), flag);
2753 }
2754 
2755 static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
2756 {
2757         clear_ti_thread_flag(task_thread_info(tsk), flag);
2758 }
2759 
2760 static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
2761 {
2762         return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
2763 }
2764 
2765 static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
2766 {
2767         return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
2768 }
2769 
2770 static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
2771 {
2772         return test_ti_thread_flag(task_thread_info(tsk), flag);
2773 }
2774 
2775 static inline void set_tsk_need_resched(struct task_struct *tsk)
2776 {
2777         set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
2778 }
2779 
2780 static inline void clear_tsk_need_resched(struct task_struct *tsk)
2781 {
2782         clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
2783 }
2784 
2785 static inline int test_tsk_need_resched(struct task_struct *tsk)
2786 {
2787         return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
2788 }
2789 
2790 static inline int restart_syscall(void)
2791 {
2792         set_tsk_thread_flag(current, TIF_SIGPENDING);
2793         return -ERESTARTNOINTR;
2794 }
2795 
2796 static inline int signal_pending(struct task_struct *p)
2797 {
2798         return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
2799 }
2800 
2801 static inline int __fatal_signal_pending(struct task_struct *p)
2802 {
2803         return unlikely(sigismember(&p->pending.signal, SIGKILL));
2804 }
2805 
2806 static inline int fatal_signal_pending(struct task_struct *p)
2807 {
2808         return signal_pending(p) && __fatal_signal_pending(p);
2809 }
2810 
2811 static inline int signal_pending_state(long state, struct task_struct *p)
2812 {
2813         if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL)))
2814                 return 0;
2815         if (!signal_pending(p))
2816                 return 0;
2817 
2818         return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
2819 }
2820 
2821 /*
2822  * cond_resched() and cond_resched_lock(): latency reduction via
2823  * explicit rescheduling in places that are safe. The return
2824  * value indicates whether a reschedule was done in fact.
2825  * cond_resched_lock() will drop the spinlock before scheduling,
2826  * cond_resched_softirq() will enable bhs before scheduling.
2827  */
2828 extern int _cond_resched(void);
2829 
2830 #define cond_resched() ({                       \
2831         ___might_sleep(__FILE__, __LINE__, 0);  \
2832         _cond_resched();                        \
2833 })
2834 
2835 extern int __cond_resched_lock(spinlock_t *lock);
2836 
2837 #ifdef CONFIG_PREEMPT_COUNT
2838 #define PREEMPT_LOCK_OFFSET     PREEMPT_OFFSET
2839 #else
2840 #define PREEMPT_LOCK_OFFSET     0
2841 #endif
2842 
2843 #define cond_resched_lock(lock) ({                              \
2844         ___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\
2845         __cond_resched_lock(lock);                              \
2846 })
2847 
2848 extern int __cond_resched_softirq(void);
2849 
2850 #define cond_resched_softirq() ({                                       \
2851         ___might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET);     \
2852         __cond_resched_softirq();                                       \
2853 })
2854 
2855 static inline void cond_resched_rcu(void)
2856 {
2857 #if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU)
2858         rcu_read_unlock();
2859         cond_resched();
2860         rcu_read_lock();
2861 #endif
2862 }
2863 
2864 /*
2865  * Does a critical section need to be broken due to another
2866  * task waiting?: (technically does not depend on CONFIG_PREEMPT,
2867  * but a general need for low latency)
2868  */
2869 static inline int spin_needbreak(spinlock_t *lock)
2870 {
2871 #ifdef CONFIG_PREEMPT
2872         return spin_is_contended(lock);
2873 #else
2874         return 0;
2875 #endif
2876 }
2877 
2878 /*
2879  * Idle thread specific functions to determine the need_resched
2880  * polling state.
2881  */
2882 #ifdef TIF_POLLING_NRFLAG
2883 static inline int tsk_is_polling(struct task_struct *p)
2884 {
2885         return test_tsk_thread_flag(p, TIF_POLLING_NRFLAG);
2886 }
2887 
2888 static inline void __current_set_polling(void)
2889 {
2890         set_thread_flag(TIF_POLLING_NRFLAG);
2891 }
2892 
2893 static inline bool __must_check current_set_polling_and_test(void)
2894 {
2895         __current_set_polling();
2896 
2897         /*
2898          * Polling state must be visible before we test NEED_RESCHED,
2899          * paired by resched_curr()
2900          */
2901         smp_mb__after_atomic();
2902 
2903         return unlikely(tif_need_resched());
2904 }
2905 
2906 static inline void __current_clr_polling(void)
2907 {
2908         clear_thread_flag(TIF_POLLING_NRFLAG);
2909 }
2910 
2911 static inline bool __must_check current_clr_polling_and_test(void)
2912 {
2913         __current_clr_polling();
2914 
2915         /*
2916          * Polling state must be visible before we test NEED_RESCHED,
2917          * paired by resched_curr()
2918          */
2919         smp_mb__after_atomic();
2920 
2921         return unlikely(tif_need_resched());
2922 }
2923 
2924 #else
2925 static inline int tsk_is_polling(struct task_struct *p) { return 0; }
2926 static inline void __current_set_polling(void) { }
2927 static inline void __current_clr_polling(void) { }
2928 
2929 static inline bool __must_check current_set_polling_and_test(void)
2930 {
2931         return unlikely(tif_need_resched());
2932 }
2933 static inline bool __must_check current_clr_polling_and_test(void)
2934 {
2935         return unlikely(tif_need_resched());
2936 }
2937 #endif
2938 
2939 static inline void current_clr_polling(void)
2940 {
2941         __current_clr_polling();
2942 
2943         /*
2944          * Ensure we check TIF_NEED_RESCHED after we clear the polling bit.
2945          * Once the bit is cleared, we'll get IPIs with every new
2946          * TIF_NEED_RESCHED and the IPI handler, scheduler_ipi(), will also
2947          * fold.
2948          */
2949         smp_mb(); /* paired with resched_curr() */
2950 
2951         preempt_fold_need_resched();
2952 }
2953 
2954 static __always_inline bool need_resched(void)
2955 {
2956         return unlikely(tif_need_resched());
2957 }
2958 
2959 /*
2960  * Thread group CPU time accounting.
2961  */
2962 void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times);
2963 void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times);
2964 
2965 static inline void thread_group_cputime_init(struct signal_struct *sig)
2966 {
2967         raw_spin_lock_init(&sig->cputimer.lock);
2968 }
2969 
2970 /*
2971  * Reevaluate whether the task has signals pending delivery.
2972  * Wake the task if so.
2973  * This is required every time the blocked sigset_t changes.
2974  * callers must hold sighand->siglock.
2975  */
2976 extern void recalc_sigpending_and_wake(struct task_struct *t);
2977 extern void recalc_sigpending(void);
2978 
2979 extern void signal_wake_up_state(struct task_struct *t, unsigned int state);
2980 
2981 static inline void signal_wake_up(struct task_struct *t, bool resume)
2982 {
2983         signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0);
2984 }
2985 static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume)
2986 {
2987         signal_wake_up_state(t, resume ? __TASK_TRACED : 0);
2988 }
2989 
2990 /*
2991  * Wrappers for p->thread_info->cpu access. No-op on UP.
2992  */
2993 #ifdef CONFIG_SMP
2994 
2995 static inline unsigned int task_cpu(const struct task_struct *p)
2996 {
2997         return task_thread_info(p)->cpu;
2998 }
2999 
3000 static inline int task_node(const struct task_struct *p)
3001 {
3002         return cpu_to_node(task_cpu(p));
3003 }
3004 
3005 extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
3006 
3007 #else
3008 
3009 static inline unsigned int task_cpu(const struct task_struct *p)
3010 {
3011         return 0;
3012 }
3013 
3014 static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
3015 {
3016 }
3017 
3018 #endif /* CONFIG_SMP */
3019 
3020 extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
3021 extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
3022 
3023 #ifdef CONFIG_CGROUP_SCHED
3024 extern struct task_group root_task_group;
3025 #endif /* CONFIG_CGROUP_SCHED */
3026 
3027 extern int task_can_switch_user(struct user_struct *up,
3028                                         struct task_struct *tsk);
3029 
3030 #ifdef CONFIG_TASK_XACCT
3031 static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
3032 {
3033         tsk->ioac.rchar += amt;
3034 }
3035 
3036 static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
3037 {
3038         tsk->ioac.wchar += amt;
3039 }
3040 
3041 static inline void inc_syscr(struct task_struct *tsk)
3042 {
3043         tsk->ioac.syscr++;
3044 }
3045 
3046 static inline void inc_syscw(struct task_struct *tsk)
3047 {
3048         tsk->ioac.syscw++;
3049 }
3050 #else
3051 static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
3052 {
3053 }
3054 
3055 static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
3056 {
3057 }
3058 
3059 static inline void inc_syscr(struct task_struct *tsk)
3060 {
3061 }
3062 
3063 static inline void inc_syscw(struct task_struct *tsk)
3064 {
3065 }
3066 #endif
3067 
3068 #ifndef TASK_SIZE_OF
3069 #define TASK_SIZE_OF(tsk)       TASK_SIZE
3070 #endif
3071 
3072 #ifdef CONFIG_MEMCG
3073 extern void mm_update_next_owner(struct mm_struct *mm);
3074 #else
3075 static inline void mm_update_next_owner(struct mm_struct *mm)
3076 {
3077 }
3078 #endif /* CONFIG_MEMCG */
3079 
3080 static inline unsigned long task_rlimit(const struct task_struct *tsk,
3081                 unsigned int limit)
3082 {
3083         return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_cur);
3084 }
3085 
3086 static inline unsigned long task_rlimit_max(const struct task_struct *tsk,
3087                 unsigned int limit)
3088 {
3089         return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_max);
3090 }
3091 
3092 static inline unsigned long rlimit(unsigned int limit)
3093 {
3094         return task_rlimit(current, limit);
3095 }
3096 
3097 static inline unsigned long rlimit_max(unsigned int limit)
3098 {
3099         return task_rlimit_max(current, limit);
3100 }
3101 
3102 #endif
3103 

This page was automatically generated by LXR 0.3.1 (source).  •  Linux is a registered trademark of Linus Torvalds  •  Contact us