Version:  2.0.40 2.2.26 2.4.37 3.13 3.14 3.15 3.16 3.17 3.18 3.19 4.0 4.1 4.2 4.3 4.4 4.5 4.6 4.7 4.8 4.9 4.10

Linux/include/linux/sched.h

  1 #ifndef _LINUX_SCHED_H
  2 #define _LINUX_SCHED_H
  3 
  4 #include <asm/param.h>  /* for HZ */
  5 
  6 extern unsigned long global_event;
  7 
  8 #include <linux/binfmts.h>
  9 #include <linux/personality.h>
 10 #include <linux/tasks.h>
 11 #include <linux/kernel.h>
 12 #include <linux/types.h>
 13 #include <linux/times.h>
 14 #include <linux/timex.h>
 15 
 16 #include <asm/system.h>
 17 #include <asm/semaphore.h>
 18 #include <asm/page.h>
 19 
 20 #include <linux/smp.h>
 21 #include <linux/tty.h>
 22 #include <linux/sem.h>
 23 #include <linux/signal.h>
 24 #include <linux/securebits.h>
 25 
 26 /*
 27  * cloning flags:
 28  */
 29 #define CSIGNAL         0x000000ff      /* signal mask to be sent at exit */
 30 #define CLONE_VM        0x00000100      /* set if VM shared between processes */
 31 #define CLONE_FS        0x00000200      /* set if fs info shared between processes */
 32 #define CLONE_FILES     0x00000400      /* set if open files shared between processes */
 33 #define CLONE_SIGHAND   0x00000800      /* set if signal handlers shared */
 34 #define CLONE_PID       0x00001000      /* set if pid shared */
 35 #define CLONE_PTRACE    0x00002000      /* set if we want to let tracing continue on the child too */
 36 #define CLONE_VFORK     0x00004000      /* set if the parent wants the child to wake it up on mm_release */
 37 
 38 /*
 39  * These are the constant used to fake the fixed-point load-average
 40  * counting. Some notes:
 41  *  - 11 bit fractions expand to 22 bits by the multiplies: this gives
 42  *    a load-average precision of 10 bits integer + 11 bits fractional
 43  *  - if you want to count load-averages more often, you need more
 44  *    precision, or rounding will get you. With 2-second counting freq,
 45  *    the EXP_n values would be 1981, 2034 and 2043 if still using only
 46  *    11 bit fractions.
 47  */
 48 extern unsigned long avenrun[];         /* Load averages */
 49 
 50 #define FSHIFT          11              /* nr of bits of precision */
 51 #define FIXED_1         (1<<FSHIFT)     /* 1.0 as fixed-point */
 52 #define LOAD_FREQ       (5*HZ)          /* 5 sec intervals */
 53 #define EXP_1           1884            /* 1/exp(5sec/1min) as fixed-point */
 54 #define EXP_5           2014            /* 1/exp(5sec/5min) */
 55 #define EXP_15          2037            /* 1/exp(5sec/15min) */
 56 
 57 #define CALC_LOAD(load,exp,n) \
 58         load *= exp; \
 59         load += n*(FIXED_1-exp); \
 60         load >>= FSHIFT;
 61 
 62 #define CT_TO_SECS(x)   ((x) / HZ)
 63 #define CT_TO_USECS(x)  (((x) % HZ) * 1000000/HZ)
 64 
 65 extern int nr_running, nr_tasks;
 66 extern int last_pid;
 67 
 68 #include <linux/fs.h>
 69 #include <linux/time.h>
 70 #include <linux/param.h>
 71 #include <linux/resource.h>
 72 #include <linux/timer.h>
 73 
 74 #include <asm/processor.h>
 75 
 76 #define TASK_RUNNING            0
 77 #define TASK_INTERRUPTIBLE      1
 78 #define TASK_UNINTERRUPTIBLE    2
 79 #define TASK_ZOMBIE             4
 80 #define TASK_STOPPED            8
 81 #define TASK_SWAPPING           16
 82 #define TASK_EXCLUSIVE          32
 83 
 84 /*
 85  * Scheduling policies
 86  */
 87 #define SCHED_OTHER             0
 88 #define SCHED_FIFO              1
 89 #define SCHED_RR                2
 90 
 91 /*
 92  * This is an additional bit set when we want to
 93  * yield the CPU for one re-schedule..
 94  */
 95 #define SCHED_YIELD             0x10
 96 
 97 struct sched_param {
 98         int sched_priority;
 99 };
100 
101 #ifdef __KERNEL__
102 
103 #include <asm/spinlock.h>
104 
105 /*
106  * This serializes "schedule()" and also protects
107  * the run-queue from deletions/modifications (but
108  * _adding_ to the beginning of the run-queue has
109  * a separate lock).
110  */
111 extern rwlock_t tasklist_lock;
112 extern spinlock_t runqueue_lock;
113 
114 extern void sched_init(void);
115 extern void init_idle(void);
116 extern void show_state(void);
117 extern void trap_init(void);
118 
119 #define MAX_SCHEDULE_TIMEOUT    LONG_MAX
120 extern signed long FASTCALL(schedule_timeout(signed long timeout));
121 asmlinkage void schedule(void);
122 
123 extern int schedule_task(struct tq_struct *task);
124 extern void flush_scheduled_tasks(void);
125 extern int start_context_thread(void);
126 extern int current_is_keventd(void);
127 
128 /*
129  * The default fd array needs to be at least BITS_PER_LONG,
130  * as this is the granularity returned by copy_fdset().
131  */
132 #define NR_OPEN_DEFAULT BITS_PER_LONG
133 
134 /*
135  * Open file table structure
136  */
137 struct files_struct {
138         atomic_t count;
139         int max_fds;
140         int max_fdset;
141         int next_fd;
142         struct file ** fd;      /* current fd array */
143         fd_set *close_on_exec;
144         fd_set *open_fds;
145         fd_set close_on_exec_init;
146         fd_set open_fds_init;
147         struct file * fd_array[NR_OPEN_DEFAULT];
148 };
149 
150 #define INIT_FILES { \
151         ATOMIC_INIT(1), \
152         NR_OPEN_DEFAULT, \
153         __FD_SETSIZE, \
154         0, \
155         &init_files.fd_array[0], \
156         &init_files.close_on_exec_init, \
157         &init_files.open_fds_init, \
158         { { 0, } }, \
159         { { 0, } }, \
160         { NULL, } \
161 }
162 
163 struct fs_struct {
164         atomic_t count;
165         int umask;
166         struct dentry * root, * pwd;
167 };
168 
169 #define INIT_FS { \
170         ATOMIC_INIT(1), \
171         0022, \
172         NULL, NULL \
173 }
174 
175 /* Maximum number of active map areas.. This is a random (large) number */
176 #define MAX_MAP_COUNT   (65536)
177 
178 /* Number of map areas at which the AVL tree is activated. This is arbitrary. */
179 #define AVL_MIN_MAP_COUNT       32
180 
181 struct mm_struct {
182         struct vm_area_struct *mmap;            /* list of VMAs */
183         struct vm_area_struct *mmap_avl;        /* tree of VMAs */
184         struct vm_area_struct *mmap_cache;      /* last find_vma result */
185         pgd_t * pgd;
186         atomic_t count;
187         int map_count;                          /* number of VMAs */
188         struct semaphore mmap_sem;
189 #ifdef __alpha__
190         unsigned long context[NR_CPUS];
191 #else
192         unsigned long context;
193 #endif
194         unsigned long start_code, end_code, start_data, end_data;
195         unsigned long start_brk, brk, start_stack;
196         unsigned long arg_start, arg_end, env_start, env_end;
197         unsigned long rss, total_vm, locked_vm;
198         unsigned long def_flags;
199         unsigned long cpu_vm_mask;
200         unsigned long swap_cnt; /* number of pages to swap on next pass */
201         unsigned long swap_address;
202         /*
203          * This is an architecture-specific pointer: the portable
204          * part of Linux does not know about any segments.
205          */
206         void * segments;
207 };
208 
209 #ifdef __alpha__
210 #define CONTEXT_INIT    { 0, }
211 #else
212 #define CONTEXT_INIT    0
213 #endif
214 
215 #define INIT_MM {                                       \
216                 &init_mmap, NULL, NULL,                 \
217                 swapper_pg_dir,                         \
218                 ATOMIC_INIT(1), 1,                      \
219                 MUTEX,                                  \
220                 CONTEXT_INIT,                           \
221                 0, 0, 0, 0,                             \
222                 0, 0, 0,                                \
223                 0, 0, 0, 0,                             \
224                 0, 0, 0,                                \
225                 0, 0, 0, 0, NULL }
226 
227 struct signal_struct {
228         atomic_t                count;
229         struct k_sigaction      action[_NSIG];
230         spinlock_t              siglock;
231 };
232 
233 
234 #define INIT_SIGNALS { \
235                 ATOMIC_INIT(1), \
236                 { {{0,}}, }, \
237                 SPIN_LOCK_UNLOCKED }
238 
239 /*
240  * Some day this will be a full-fledged user tracking system..
241  * Right now it is only used to track how many processes a
242  * user has, but it has the potential to track memory usage etc.
243  */
244 struct user_struct;
245 
246 struct task_struct {
247 /* these are hardcoded - don't touch */
248         volatile long state;    /* -1 unrunnable, 0 runnable, >0 stopped */
249         unsigned long flags;    /* per process flags, defined below */
250         int sigpending;
251         mm_segment_t addr_limit;        /* thread address space:
252                                                 0-0xBFFFFFFF for user-thead
253                                                 0-0xFFFFFFFF for kernel-thread
254                                          */
255         struct exec_domain *exec_domain;
256         long need_resched;
257         unsigned long ptrace;
258 
259 /* various fields */
260         long counter;
261         long priority;
262         cycles_t avg_slice;
263 /* SMP and runqueue state */
264         int has_cpu;
265         int processor;
266         int last_processor;
267         int lock_depth;         /* Lock depth. We can context switch in and out of holding a syscall kernel lock... */  
268         struct task_struct *next_task, *prev_task;
269         struct task_struct *next_run,  *prev_run;
270 
271         unsigned int task_exclusive;    /* task wants wake-one semantics in __wake_up() */
272 /* task state */
273         struct linux_binfmt *binfmt;
274         int exit_code, exit_signal;
275         int pdeath_signal;  /*  The signal sent when the parent dies  */
276         /* ??? */
277         unsigned long personality;
278         unsigned int dumpable:1;
279         int did_exec:1;
280         pid_t pid;
281         pid_t pgrp;
282         pid_t tty_old_pgrp;
283         pid_t session;
284         /* boolean value for session group leader */
285         int leader;
286         /* 
287          * pointers to (original) parent process, youngest child, younger sibling,
288          * older sibling, respectively.  (p->father can be replaced with 
289          * p->p_pptr->pid)
290          */
291         struct task_struct *p_opptr, *p_pptr, *p_cptr, *p_ysptr, *p_osptr;
292 
293         /* PID hash table linkage. */
294         struct task_struct *pidhash_next;
295         struct task_struct **pidhash_pprev;
296 
297         /* Pointer to task[] array linkage. */
298         struct task_struct **tarray_ptr;
299 
300         struct wait_queue *wait_chldexit;       /* for wait4() */
301         struct semaphore *vfork_sem;            /* for vfork() */
302         unsigned long policy, rt_priority;
303         unsigned long it_real_value, it_prof_value, it_virt_value;
304         unsigned long it_real_incr, it_prof_incr, it_virt_incr;
305         struct timer_list real_timer;
306         struct tms times;
307         unsigned long start_time;
308         long per_cpu_utime[NR_CPUS], per_cpu_stime[NR_CPUS];
309 /* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
310         unsigned long min_flt, maj_flt, nswap, cmin_flt, cmaj_flt, cnswap;
311         int swappable:1;
312 /* process credentials */
313         uid_t uid,euid,suid,fsuid;
314         gid_t gid,egid,sgid,fsgid;
315         int ngroups;
316         gid_t   groups[NGROUPS];
317         kernel_cap_t   cap_effective, cap_inheritable, cap_permitted;
318         int keep_capabilities:1;
319         struct user_struct *user;
320 /* limits */
321         struct rlimit rlim[RLIM_NLIMITS];
322         unsigned short used_math;
323         char comm[16];
324 /* file system info */
325         int link_count;
326         struct tty_struct *tty; /* NULL if no tty */
327 /* ipc stuff */
328         struct sem_undo *semundo;
329         struct sem_queue *semsleeping;
330 /* tss for this task */
331         struct thread_struct tss;
332 /* filesystem information */
333         struct fs_struct *fs;
334 /* open file information */
335         struct files_struct *files;
336 /* memory management info */
337         struct mm_struct *mm;
338         struct list_head local_pages;
339         int allocation_order, nr_local_pages;
340         int fs_locks;
341 
342 /* signal handlers */
343         spinlock_t sigmask_lock;        /* Protects signal and blocked */
344         struct signal_struct *sig;
345         sigset_t signal, blocked;
346         struct signal_queue *sigqueue, **sigqueue_tail;
347         unsigned long sas_ss_sp;
348         size_t sas_ss_size;
349         
350 /* Thread group tracking */
351         u32 parent_exec_id;
352         u32 self_exec_id;
353 
354 /* oom handling */
355         int oom_kill_try;
356 };
357 
358 /*
359  * Per process flags
360  */
361 #define PF_ALIGNWARN    0x00000001      /* Print alignment warning msgs */
362                                         /* Not implemented yet, only for 486*/
363 #define PF_STARTING     0x00000002      /* being created */
364 #define PF_EXITING      0x00000004      /* getting shut down */
365 #define PF_FORKNOEXEC   0x00000040      /* forked but didn't exec */
366 #define PF_SUPERPRIV    0x00000100      /* used super-user privileges */
367 #define PF_DUMPCORE     0x00000200      /* dumped core */
368 #define PF_SIGNALED     0x00000400      /* killed by a signal */
369 #define PF_MEMALLOC     0x00000800      /* Allocating memory */
370 #define PF_VFORK        0x00001000      /* Wake up parent in mm_release */
371 #define PF_FREE_PAGES   0x00002000      /* The current-> */
372 
373 #define PF_USEDFPU      0x00100000      /* task used FPU this quantum (SMP) */
374 
375 /*
376  * Ptrace flags
377  */
378 #define PT_PTRACED      0x00000001      /* set if ptrace (0) has been called */
379 #define PT_TRACESYS     0x00000002      /* tracing system calls */
380 #define PT_DTRACE       0x00000004      /* delayed trace (used on m68k, i386) */
381 
382 
383 /*
384  * Limit the stack by to some sane default: root can always
385  * increase this limit if needed..  8MB seems reasonable.
386  */
387 #define _STK_LIM        (8*1024*1024)
388 
389 #define DEF_PRIORITY    (20*HZ/100)     /* 210 ms time slices */
390 
391 /*
392  *  INIT_TASK is used to set up the first task table, touch at
393  * your own risk!. Base=0, limit=0x1fffff (=2MB)
394  */
395 #define INIT_TASK \
396 /* state etc */ { 0,0,0,KERNEL_DS,&default_exec_domain,0,0, \
397 /* counter */   DEF_PRIORITY,DEF_PRIORITY,0, \
398 /* SMP */       0,0,0,-1, \
399 /* schedlink */ &init_task,&init_task, &init_task, &init_task, \
400 /* task_exclusive */ 0, \
401 /* binfmt */    NULL, \
402 /* ec,brk... */ 0,0,0,0,0,0, \
403 /* pid etc.. */ 0,0,0,0,0, \
404 /* proc links*/ &init_task,&init_task,NULL,NULL,NULL, \
405 /* pidhash */   NULL, NULL, \
406 /* tarray */    &task[0], \
407 /* chld wait */ NULL, NULL, \
408 /* timeout */   SCHED_OTHER,0,0,0,0,0,0,0, \
409 /* timer */     { NULL, NULL, 0, 0, it_real_fn }, \
410 /* utime */     {0,0,0,0},0, \
411 /* per CPU times */ {0, }, {0, }, \
412 /* flt */       0,0,0,0,0,0, \
413 /* swp */       0, \
414 /* process credentials */                                       \
415 /* uid etc */   0,0,0,0,0,0,0,0,                                \
416 /* suppl grps*/ 0, {0,},                                        \
417 /* caps */      CAP_INIT_EFF_SET,CAP_INIT_INH_SET,CAP_FULL_SET, \
418 /* keep_caps */ 0,                                              \
419 /* user */      NULL,                                           \
420 /* rlimits */   INIT_RLIMITS, \
421 /* math */      0, \
422 /* comm */      "swapper", \
423 /* fs info */   0,NULL, \
424 /* ipc */       NULL, NULL, \
425 /* tss */       INIT_TSS, \
426 /* fs */        &init_fs, \
427 /* files */     &init_files, \
428 /* mm */        &init_mm, { &init_task.local_pages, &init_task.local_pages}, 0, 0, 0, \
429 /* signals */   SPIN_LOCK_UNLOCKED, &init_signals, {{0}}, {{0}}, NULL, &init_task.sigqueue, 0, 0, \
430 /* exec cts */  0,0, \
431 /* oom */       0, \
432 }
433 
434 union task_union {
435         struct task_struct task;
436         unsigned long stack[2048];
437 };
438 
439 extern union task_union init_task_union;
440 
441 extern struct   mm_struct init_mm;
442 extern struct task_struct *task[NR_TASKS];
443 
444 extern struct task_struct **tarray_freelist;
445 extern spinlock_t taskslot_lock;
446 
447 extern __inline__ void add_free_taskslot(struct task_struct **t)
448 {
449         spin_lock(&taskslot_lock);
450         *t = (struct task_struct *) tarray_freelist;
451         tarray_freelist = t;
452         spin_unlock(&taskslot_lock);
453 }
454 
455 extern __inline__ struct task_struct **get_free_taskslot(void)
456 {
457         struct task_struct **tslot;
458 
459         spin_lock(&taskslot_lock);
460         if((tslot = tarray_freelist) != NULL)
461                 tarray_freelist = (struct task_struct **) *tslot;
462         spin_unlock(&taskslot_lock);
463 
464         return tslot;
465 }
466 
467 /* PID hashing. */
468 #define PIDHASH_SZ (NR_TASKS >> 2)
469 extern struct task_struct *pidhash[PIDHASH_SZ];
470 
471 #define pid_hashfn(x)   ((((x) >> 8) ^ (x)) & (PIDHASH_SZ - 1))
472 
473 extern __inline__ void hash_pid(struct task_struct *p)
474 {
475         struct task_struct **htable = &pidhash[pid_hashfn(p->pid)];
476 
477         if((p->pidhash_next = *htable) != NULL)
478                 (*htable)->pidhash_pprev = &p->pidhash_next;
479         *htable = p;
480         p->pidhash_pprev = htable;
481 }
482 
483 extern __inline__ void unhash_pid(struct task_struct *p)
484 {
485         if(p->pidhash_next)
486                 p->pidhash_next->pidhash_pprev = p->pidhash_pprev;
487         *p->pidhash_pprev = p->pidhash_next;
488 }
489 
490 extern __inline__ struct task_struct *find_task_by_pid(int pid)
491 {
492         struct task_struct *p, **htable = &pidhash[pid_hashfn(pid)];
493 
494         for(p = *htable; p && p->pid != pid; p = p->pidhash_next)
495                 ;
496 
497         return p;
498 }
499 
500 /* per-UID process charging. */
501 extern int alloc_uid(struct task_struct *p);
502 void free_uid(struct task_struct *p);
503 
504 #include <asm/current.h>
505 
506 extern unsigned long volatile jiffies;
507 extern unsigned long itimer_ticks;
508 extern unsigned long itimer_next;
509 extern volatile struct timeval xtime;
510 extern void do_timer(struct pt_regs *);
511 
512 extern unsigned int * prof_buffer;
513 extern unsigned long prof_len;
514 extern unsigned long prof_shift;
515 
516 #define CURRENT_TIME (xtime.tv_sec)
517 
518 extern void FASTCALL(__wake_up(struct wait_queue ** p, unsigned int mode));
519 extern void FASTCALL(sleep_on(struct wait_queue ** p));
520 extern long FASTCALL(sleep_on_timeout(struct wait_queue ** p,
521                                       signed long timeout));
522 extern void FASTCALL(interruptible_sleep_on(struct wait_queue ** p));
523 extern long FASTCALL(interruptible_sleep_on_timeout(struct wait_queue ** p,
524                                                     signed long timeout));
525 extern void FASTCALL(wake_up_process(struct task_struct * tsk));
526 
527 #define wake_up(x)                      __wake_up((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE | TASK_EXCLUSIVE)
528 #define wake_up_interruptible(x)        __wake_up((x),TASK_INTERRUPTIBLE | TASK_EXCLUSIVE)
529 
530 #define __set_task_state(tsk, state_value)      do { (tsk)->state = state_value; } while (0)
531 #ifdef __SMP__
532 #define set_task_state(tsk, state_value)        do { __set_task_state(tsk, state_value); mb(); } while (0)
533 #else
534 #define set_task_state(tsk, state_value)        __set_task_state(tsk, state_value)
535 #endif
536 
537 #define __set_current_state(state_value)        do { current->state = state_value; } while (0)
538 #ifdef __SMP__
539 #define set_current_state(state_value)          do { __set_current_state(state_value); mb(); } while (0)
540 #else
541 #define set_current_state(state_value)          __set_current_state(state_value)
542 #endif
543 
544 extern int in_group_p(gid_t grp);
545 extern int in_egroup_p(gid_t grp);
546 
547 extern void flush_signals(struct task_struct *);
548 extern void flush_signal_handlers(struct task_struct *);
549 extern int dequeue_signal(sigset_t *block, siginfo_t *);
550 extern int send_sig_info(int, struct siginfo *info, struct task_struct *);
551 extern int force_sig_info(int, struct siginfo *info, struct task_struct *);
552 extern int kill_pg_info(int, struct siginfo *info, pid_t);
553 extern int kill_sl_info(int, struct siginfo *info, pid_t);
554 extern int kill_proc_info(int, struct siginfo *info, pid_t);
555 extern int kill_something_info(int, struct siginfo *info, int);
556 extern void notify_parent(struct task_struct * tsk, int);
557 extern void force_sig(int sig, struct task_struct * p);
558 extern int send_sig(int sig, struct task_struct * p, int priv);
559 extern int kill_pg(pid_t, int, int);
560 extern int kill_sl(pid_t, int, int);
561 extern int kill_proc(pid_t, int, int);
562 extern int do_sigaction(int sig, const struct k_sigaction *act,
563                         struct k_sigaction *oact);
564 extern int do_sigaltstack(const stack_t *ss, stack_t *oss, unsigned long sp);
565 
566 extern inline int signal_pending(struct task_struct *p)
567 {
568         return (p->sigpending != 0);
569 }
570 
571 /* Reevaluate whether the task has signals pending delivery.
572    This is required every time the blocked sigset_t changes.
573    All callers should have t->sigmask_lock.  */
574 
575 static inline void recalc_sigpending(struct task_struct *t)
576 {
577         unsigned long ready;
578         long i;
579 
580         switch (_NSIG_WORDS) {
581         default:
582                 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
583                         ready |= t->signal.sig[i] &~ t->blocked.sig[i];
584                 break;
585 
586         case 4: ready  = t->signal.sig[3] &~ t->blocked.sig[3];
587                 ready |= t->signal.sig[2] &~ t->blocked.sig[2];
588                 ready |= t->signal.sig[1] &~ t->blocked.sig[1];
589                 ready |= t->signal.sig[0] &~ t->blocked.sig[0];
590                 break;
591 
592         case 2: ready  = t->signal.sig[1] &~ t->blocked.sig[1];
593                 ready |= t->signal.sig[0] &~ t->blocked.sig[0];
594                 break;
595 
596         case 1: ready  = t->signal.sig[0] &~ t->blocked.sig[0];
597         }
598 
599         t->sigpending = (ready != 0);
600 }
601 
602 /* True if we are on the alternate signal stack.  */
603 
604 static inline int on_sig_stack(unsigned long sp)
605 {
606         return (sp >= current->sas_ss_sp
607                 && sp < current->sas_ss_sp + current->sas_ss_size);
608 }
609 
610 static inline int sas_ss_flags(unsigned long sp)
611 {
612         return (current->sas_ss_size == 0 ? SS_DISABLE
613                 : on_sig_stack(sp) ? SS_ONSTACK : 0);
614 }
615 
616 extern int request_irq(unsigned int irq,
617                        void (*handler)(int, void *, struct pt_regs *),
618                        unsigned long flags, 
619                        const char *device,
620                        void *dev_id);
621 extern void free_irq(unsigned int irq, void *dev_id);
622 
623 /*
624  * This has now become a routine instead of a macro, it sets a flag if
625  * it returns true (to do BSD-style accounting where the process is flagged
626  * if it uses root privs). The implication of this is that you should do
627  * normal permissions checks first, and check suser() last.
628  *
629  * [Dec 1997 -- Chris Evans]
630  * For correctness, the above considerations need to be extended to
631  * fsuser(). This is done, along with moving fsuser() checks to be
632  * last.
633  *
634  * These will be removed, but in the mean time, when the SECURE_NOROOT 
635  * flag is set, uids don't grant privilege.
636  */
637 extern inline int suser(void)
638 {
639         if (!issecure(SECURE_NOROOT) && current->euid == 0) { 
640                 current->flags |= PF_SUPERPRIV;
641                 return 1;
642         }
643         return 0;
644 }
645 
646 extern inline int fsuser(void)
647 {
648         if (!issecure(SECURE_NOROOT) && current->fsuid == 0) {
649                 current->flags |= PF_SUPERPRIV;
650                 return 1;
651         }
652         return 0;
653 }
654 
655 /*
656  * capable() checks for a particular capability.  
657  * New privilege checks should use this interface, rather than suser() or
658  * fsuser(). See include/linux/capability.h for defined capabilities.
659  */
660 
661 extern inline int capable(int cap)
662 {
663 #if 1 /* ok now */
664         if (cap_raised(current->cap_effective, cap))
665 #else
666         if (cap_is_fs_cap(cap) ? current->fsuid == 0 : current->euid == 0)
667 #endif
668         {
669                 current->flags |= PF_SUPERPRIV;
670                 return 1;
671         }
672         return 0;
673 }
674 
675 /*
676  * Routines for handling mm_structs
677  */
678 extern struct mm_struct * mm_alloc(void);
679 static inline void mmget(struct mm_struct * mm)
680 {
681         atomic_inc(&mm->count);
682 }
683 extern void mmput(struct mm_struct *);
684 /* Remove the current tasks stale references to the old mm_struct */
685 extern void mm_release(void);
686 
687 /*
688  * Routines for handling the fd arrays
689  */
690 extern struct file ** alloc_fd_array(int);
691 extern int expand_fd_array(struct files_struct *, int nr);
692 extern void free_fd_array(struct file **, int);
693 
694 extern fd_set *alloc_fdset(int);
695 extern int expand_fdset(struct files_struct *, int nr);
696 extern void free_fdset(fd_set *, int);
697 
698 /* Expand files.  Return <0 on error; 0 nothing done; 1 files expanded,
699  * we may have blocked. */
700 static inline int expand_files(struct files_struct *files, int nr)
701 {
702         int err, expand = 0;
703 #ifdef FDSET_DEBUG      
704         printk (KERN_ERR __FUNCTION__ " %d: nr = %d\n", current->pid, nr);
705 #endif
706         
707         if (nr >= files->max_fdset) {
708                 expand = 1;
709                 if ((err = expand_fdset(files, nr + 1)))
710                         goto out;
711         }
712         if (nr >= files->max_fds) {
713                 expand = 1;
714                 if ((err = expand_fd_array(files, nr + 1)))
715                         goto out;
716         }
717         err = expand;
718  out:
719 #ifdef FDSET_DEBUG      
720         if (err)
721                 printk (KERN_ERR __FUNCTION__ " %d: return %d\n", current->pid, err);
722 #endif
723         return err;
724 }
725 
726 extern int  copy_thread(int, unsigned long, unsigned long, struct task_struct *, struct pt_regs *);
727 extern void flush_thread(void);
728 extern void exit_thread(void);
729 
730 extern void exit_mm(struct task_struct *);
731 extern void exit_fs(struct task_struct *);
732 extern void exit_files(struct task_struct *);
733 extern void exit_sighand(struct task_struct *);
734 
735 extern void daemonize(void);
736 
737 extern int do_execve(char *, char **, char **, struct pt_regs *);
738 extern int do_fork(unsigned long, unsigned long, struct pt_regs *);
739 
740 /*
741  * The wait-queues are circular lists, and you have to be *very* sure
742  * to keep them correct. Use only these two functions to add/remove
743  * entries in the queues.
744  */
745 extern inline void __add_wait_queue(struct wait_queue ** p, struct wait_queue * wait)
746 {
747         wait->next = *p ? : WAIT_QUEUE_HEAD(p);
748         *p = wait;
749 }
750 
751 extern rwlock_t waitqueue_lock;
752 
753 extern inline void add_wait_queue(struct wait_queue ** p, struct wait_queue * wait)
754 {
755         unsigned long flags;
756 
757         write_lock_irqsave(&waitqueue_lock, flags);
758         __add_wait_queue(p, wait);
759         write_unlock_irqrestore(&waitqueue_lock, flags);
760 }
761 
762 extern inline void __remove_wait_queue(struct wait_queue ** p, struct wait_queue * wait)
763 {
764         struct wait_queue * next = wait->next;
765         struct wait_queue * head = next;
766         struct wait_queue * tmp;
767 
768         while ((tmp = head->next) != wait) {
769                 head = tmp;
770         }
771         head->next = next;
772 }
773 
774 extern inline void remove_wait_queue(struct wait_queue ** p, struct wait_queue * wait)
775 {
776         unsigned long flags;
777 
778         write_lock_irqsave(&waitqueue_lock, flags);
779         __remove_wait_queue(p, wait);
780         write_unlock_irqrestore(&waitqueue_lock, flags); 
781 }
782 
783 #define __wait_event(wq, condition)                                     \
784 do {                                                                    \
785         struct wait_queue __wait;                                       \
786                                                                         \
787         __wait.task = current;                                          \
788         add_wait_queue(&wq, &__wait);                                   \
789         for (;;) {                                                      \
790                 current->state = TASK_UNINTERRUPTIBLE;                  \
791                 mb();                                                   \
792                 if (condition)                                          \
793                         break;                                          \
794                 schedule();                                             \
795         }                                                               \
796         current->state = TASK_RUNNING;                                  \
797         remove_wait_queue(&wq, &__wait);                                \
798 } while (0)
799 
800 #define wait_event(wq, condition)                                       \
801 do {                                                                    \
802         if (condition)                                                  \
803                 break;                                                  \
804         __wait_event(wq, condition);                                    \
805 } while (0)
806 
807 #define __wait_event_interruptible(wq, condition, ret)                  \
808 do {                                                                    \
809         struct wait_queue __wait;                                       \
810                                                                         \
811         __wait.task = current;                                          \
812         add_wait_queue(&wq, &__wait);                                   \
813         for (;;) {                                                      \
814                 current->state = TASK_INTERRUPTIBLE;                    \
815                 mb();                                                   \
816                 if (condition)                                          \
817                         break;                                          \
818                 if (!signal_pending(current)) {                         \
819                         schedule();                                     \
820                         continue;                                       \
821                 }                                                       \
822                 ret = -ERESTARTSYS;                                     \
823                 break;                                                  \
824         }                                                               \
825         current->state = TASK_RUNNING;                                  \
826         remove_wait_queue(&wq, &__wait);                                \
827 } while (0)
828         
829 #define wait_event_interruptible(wq, condition)                         \
830 ({                                                                      \
831         int __ret = 0;                                                  \
832         if (!(condition))                                               \
833                 __wait_event_interruptible(wq, condition, __ret);       \
834         __ret;                                                          \
835 })
836 
837 #define REMOVE_LINKS(p) do { \
838         (p)->next_task->prev_task = (p)->prev_task; \
839         (p)->prev_task->next_task = (p)->next_task; \
840         if ((p)->p_osptr) \
841                 (p)->p_osptr->p_ysptr = (p)->p_ysptr; \
842         if ((p)->p_ysptr) \
843                 (p)->p_ysptr->p_osptr = (p)->p_osptr; \
844         else \
845                 (p)->p_pptr->p_cptr = (p)->p_osptr; \
846         } while (0)
847 
848 #define SET_LINKS(p) do { \
849         (p)->next_task = &init_task; \
850         (p)->prev_task = init_task.prev_task; \
851         init_task.prev_task->next_task = (p); \
852         init_task.prev_task = (p); \
853         (p)->p_ysptr = NULL; \
854         if (((p)->p_osptr = (p)->p_pptr->p_cptr) != NULL) \
855                 (p)->p_osptr->p_ysptr = p; \
856         (p)->p_pptr->p_cptr = p; \
857         } while (0)
858 
859 #define for_each_task(p) \
860         for (p = &init_task ; (p = p->next_task) != &init_task ; )
861 
862 #endif /* __KERNEL__ */
863 
864 #endif
865 

This page was automatically generated by LXR 0.3.1 (source).  •  Linux is a registered trademark of Linus Torvalds  •  Contact us